← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~ines-almeida/launchpad:add-amd64-gpu-bos03-builders-qastaging into launchpad:master

 

Ines Almeida has proposed merging ~ines-almeida/launchpad:add-amd64-gpu-bos03-builders-qastaging into launchpad:master.

Commit message:
vbuilder: Add new amd64-gpu bos03 builders to qastaging

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~ines-almeida/launchpad/+git/launchpad/+merge/466232
-- 
Your team Launchpad code reviewers is requested to review the proposed merge of ~ines-almeida/launchpad:add-amd64-gpu-bos03-builders-qastaging into launchpad:master.
diff --git a/.gitignore b/.gitignore
index 82f290a..107515a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
 *.pyc
+<<<<<<< .gitignore
 .tags
 version-info.py
 version-info.txt
@@ -76,3 +77,6 @@ yarn/node_modules
 requirements/combined.txt
 .tox/
 /geckodriver.log
+=======
+__pycache__
+>>>>>>> .gitignore
diff --git a/LICENSE b/LICENSE
index 30edbc5..1373a55 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,9 +1,14 @@
+<<<<<<< LICENSE
 Launchpad is Copyright 2004 Canonical Ltd.
+=======
+Launchpad is Copyright 2004-2019 Canonical Ltd.
+>>>>>>> LICENSE
 
 Canonical Ltd ("Canonical") distributes the Launchpad source code
 under the GNU Affero General Public License, version 3 ("AGPLv3").
 The full text of this licence is given below.
 
+<<<<<<< LICENSE
 The image and icon files in Launchpad are copyright Canonical, and
 unlike the source code they are not licensed under the AGPLv3.
 Canonical grants you the right to use them for testing and development
@@ -15,6 +20,11 @@ be used without the prior written permission of Canonical.
 
 Git SCM logos are licensed Creative Commons Attribution 3.0 Unported.
 
+=======
+The Launchpad name and logo are trademarks of Canonical, and may not
+be used without the prior written permission of Canonical.
+
+>>>>>>> LICENSE
 Third-party copyright in this distribution is noted where applicable.
 
 All rights not expressly granted are reserved.
@@ -685,6 +695,7 @@ For more information on this, and how to apply and follow the GNU AGPL, see
 <http://www.gnu.org/licenses/>.
 
 =========================================================================
+<<<<<<< LICENSE
 
 
 Creative Commons Attribution 3.0 Unported License
@@ -959,3 +970,5 @@ License; this License is not intended to restrict the license of any
 rights under applicable law.
 
 =========================================================================
+=======
+>>>>>>> LICENSE
diff --git a/configs/canonical-is-secgroups-production-ps5.yaml b/configs/canonical-is-secgroups-production-ps5.yaml
new file mode 120000
index 0000000..0b3e62f
--- /dev/null
+++ b/configs/canonical-is-secgroups-production-ps5.yaml
@@ -0,0 +1 @@
+canonical-is-secgroups-production.yaml
\ No newline at end of file
diff --git a/configs/canonical-is-secgroups-production.yaml b/configs/canonical-is-secgroups-production.yaml
new file mode 100644
index 0000000..e1c05df
--- /dev/null
+++ b/configs/canonical-is-secgroups-production.yaml
@@ -0,0 +1,47 @@
+all-units:
+    - bastions-ping
+    - bastions-ssh
+    - icmp
+    - is-prometheus
+    - is-vpn-ssh
+    - nagios
+applications: {}
+rules:
+    bastions-ping:
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "10.131.0.169/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "10.131.2.211/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "91.189.90.46/32"}
+        - {"protocol": "ipv6-icmp", "family": "IPv6", "cidr": "2001:67c:1561:8003::11/128"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "10.130.64.19/32"}
+    bastions-ssh:
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "91.189.90.46/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.131.0.169/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.130.64.19/32"}
+    icmp:
+        # Since we're allowing ICMPv6, we should allow ICMP in
+        # general, mainly for path MTU and useful for network
+        # troubleshooting. We can control/block on the NGFWs.
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "0.0.0.0/0"}
+        # Don't want to block ICMPv6 otherwise things may break.
+        - {"protocol": "ipv6-icmp", "family": "IPv6", "cidr": "::/0"}
+    is-prometheus:
+        # 3FP
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.94.59/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.94.60/32"}
+        - {"protocol": "tcp", "family": "IPv6", "port": 9103, "cidr": "2001:67c:1561:8008::13/128"}
+        - {"protocol": "tcp", "family": "IPv6", "port": 9103, "cidr": "2001:67c:1561:8008::14/128"}
+        # IL3
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "185.125.190.67/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "185.125.190.68/32"}
+        - {"protocol": "tcp", "family": "IPv6", "port": 9103, "cidr": "2620:2d:4000:1::67/128"}
+        - {"protocol": "tcp", "family": "IPv6", "port": 9103, "cidr": "2620:2d:4000:1::68/128"}
+    is-vpn-ssh:
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.172.62.0/23"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.172.126.0/23"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.172.190.0/23"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.172.254.0/23"}
+    nagios:
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "10.131.2.211/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.46/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "10.131.2.211/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "91.189.90.46/32"}
diff --git a/lp-builder-proxy/README.md b/lp-builder-proxy/README.md
new file mode 100644
index 0000000..3c86936
--- /dev/null
+++ b/lp-builder-proxy/README.md
@@ -0,0 +1,25 @@
+# Launchpad builder proxy
+
+This spec deploys the Launchpad builder proxy, used to allow limited
+internet access for selected builds in the Launchpad build farm.
+
+You can run it locally using Juju's LXD support and Mojo.  First, configure
+your environment to download payload builds from the output of our Jenkins
+jobs:
+
+    export MOJO_ROOT="$HOME/.local/share/mojo"
+    export MOJO_DOWNLOADER_STORAGE_URL=https://objectstorage.prodstack5.canonical.com/swift/v1/AUTH_1f1e18e10e564f7f81e345b5e4edbf6f
+    export MOJO_DOWNLOADER_CONTAINER_NAME=rutabaga-builds
+
+Then run the spec using Mojo:
+
+    mojo project-new -s focal -c containerless mojo-lp-builder-proxy
+    mojo workspace-new -p mojo-lp-builder-proxy -s focal \
+        --stage lp-builder-proxy/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+    mojo run -p mojo-lp-builder-proxy -s focal \
+        --stage lp-builder-proxy/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+
+You must have python3-requests, python3-yaml, and python3-swiftclient
+installed.
diff --git a/lp-builder-proxy/bundle.yaml b/lp-builder-proxy/bundle.yaml
new file mode 100644
index 0000000..33db555
--- /dev/null
+++ b/lp-builder-proxy/bundle.yaml
@@ -0,0 +1,217 @@
+{%- if stage_name == "production" %}
+{%-   set admin_api_username = "launchpad.net" %}
+{%-   set constraints = "root-disk-source=volume" %}
+{%-   set devel = False %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set nagios_context = "lp-prodstack-builder-proxy" %}
+{%-   set nagios_hostgroups = "prodstack-lp" %}
+{%-   set nagios_master = "nagios.ps5.internal" %}
+{%- elif stage_name == "qastaging" %}
+{%-   set admin_api_username = "qastaging.launchpad.net" %}
+{%-   set constraints = "root-disk-source=volume" %}
+{%-   set devel = False %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set nagios_context = "lp-stagingstack-builder-proxy" %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   set nagios_master = "devops-nagios.ps5.internal" %}
+{%- else %}
+{%-   set admin_api_username = "launchpad.test" %}
+{%-   set constraints = "" %}
+{%-   set devel = True %}
+{%-   set log_hosts_allow = "" %}
+{%-   set nagios_context = "lp-devel-builder-proxy" %}
+{%-   set nagios_hostgroups = "devel-lp" %}
+{#-   The configured nagios_master doesn't have to be real, but it does have
+      to resolve. #}
+{%-   set nagios_master = "localhost" %}
+{%- endif -%}
+series: {{ series }}
+applications:
+  squid-reverseproxy:
+    charm: cs:squid-reverseproxy-20
+    expose: true
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+{%- endif %}
+    options:
+      auth_list: |
+        # Deny requests to ports other than 80, 443, 9418.
+        - "!port": [80, 443, 9418]
+          http_access: deny
+        # Deny all except certain HTTP methods.
+        - "!method": [GET, POST, CONNECT, OPTIONS, PROPFIND, REPORT]
+          http_access: deny
+        # Only allow authenticated requests.
+        - "!proxy_auth": ["REQUIRED"]
+        # Allow non-source-restricted hosts on denied networks.  dstdomain
+        # is unsafe.  DO NOT USE DSTDOMAIN.  For URLs containing IP
+        # addresses, Squid will look up the PTR and match it against
+        # dstdomain rules!  This is obviously completely holey.
+        - port: [80]
+          method: [GET]
+          # DSTDOMAIN IS FORBIDDEN.
+          dst: [
+            # launchpadlibrarian.net, a benign host for nagios checks.  Note
+            # that this is a publicly-accessible host that builders can
+            # access directly, so allowing authenticated access to it
+            # without source IP restrictions is safe.
+            91.189.89.228, 91.189.89.229,
+            ]
+          http_access: allow
+        # Allow localhost and builders.
+        - "!src": [
+            127.0.0.0/8,
+{%- if devel %}
+            # Allow all of RFC1918 space for development deployments, in order
+            # to ensure that local builders can use this proxy.  Don't use the
+            # devel stage for real deployments.
+            10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16,
+{%- else %}
+            10.222.144.0/20, 10.222.160.0/20, 10.89.80.0/20, 10.89.144.0/20,
+            10.89.160.0/20, 10.133.0.0/16, 10.134.0.0/17, 10.189.16.0/20,
+            10.189.144.0/20, 10.189.160.0/20, 10.189.64.0/21,
+            10.122.32.0/21, 10.222.36.0/22,
+{%- endif %}
+            ]
+          http_access: deny
+        # Allow certain hosts on denied networks.  dstdomain is unsafe.  DO
+        # NOT USE DSTDOMAIN.  For URLs containing IP addresses, Squid will
+        # look up the PTR and match it against dstdomain rules!  This is
+        # obviously completely holey.
+        - port: [80, 443, 9418]
+          method: [GET, POST, CONNECT, OPTIONS, PROPFIND, REPORT]
+          # DSTDOMAIN IS FORBIDDEN.
+          dst: [
+            # launchpadlibrarian.net, a benign host outside prodstack for
+            # nagios checks.
+            91.189.89.228, 91.189.89.229,
+            # ftpmaster.internal, used by apt during "snapcraft pull".
+            91.189.89.100,
+            # launchpad.net, potentially used by add-apt-repository.
+            91.189.89.222, 91.189.89.223,
+            # keyserver.ubuntu.com, potentially used by add-apt-repository.
+            162.213.33.8, 162.213.33.9,
+            # ppa.launchpad.net, used by apt during "snapcraft pull".
+            91.189.95.85, 91.189.94.85,
+{%- if stage_name == "qastaging" %}
+            # ppa.dogfood.content.paddev.net, used by apt during "snapcraft
+            # pull" on qastaging.
+            91.189.90.138,
+{%- endif %}
+            # bazaar.launchpad.net, potentially used by "snapcraft pull".
+            91.189.95.84,
+            # git.launchpad.net, potentially used by "snapcraft pull".
+            91.189.94.77, 91.189.94.79,
+            # api.launchpad.net/xmlrpc.launchpad.net, used e.g. by bzr.
+            91.189.89.224, 91.189.89.225,
+            # people.canonical.com, used by some developers to host blobs.
+            91.189.89.62,
+            # search.apps.ubuntu.com/api.snapcraft.io, used by snapcraft to
+            # fetch the core snap for builds of classic snaps.
+            91.189.92.19, 91.189.92.20, 91.189.92.38, 91.189.92.39,
+            91.189.92.40, 91.189.92.41,
+            # public.apps.ubuntu.com/storage.snapcraftcontent.com, used by
+            # snapcraft to fetch the core snap for builds of classic snaps.
+            162.213.33.43, 162.213.33.81, 162.213.33.128, 162.213.33.220,
+            # kernel.ubuntu.com, used by some developers to fetch code.
+            91.189.94.216,
+            # gopkg.in, used by some Go-based snap builds.
+            185.125.188.113, 185.125.188.128, 185.125.188.236,
+            # people.ubuntu.com, which serves read-only content for Ubuntu
+            # members.
+            185.125.188.18,
+            # Edge networks, excluding office satellites.  This allows
+            # archive.ubuntu.com and security.ubuntu.com in a way that
+            # should be reasonably safe and doesn't require keeping track of
+            # individual addresses.  See:
+            # https://wiki.canonical.com/InformationInfrastructure/IS/Network
+            # https://wiki.canonical.com/InformationInfrastructure/IS/Network/IPv6/Subnets
+            91.189.88.0/24,
+            91.189.91.0/25,
+            91.189.92.128/25,
+            91.189.93.192/28,
+            "2001:67c:1360:8001::/64",
+            "2001:67c:1360:8c01::/64",
+            "2001:67c:1360:4801::/64",
+            "2001:67c:1560:8001::/64",
+            "2001:67c:1561:8001::/64",
+            "2001:67c:1562::/64",
+            "2001:67c:1562:8001::/64",
+            ]
+          http_access: allow
+        # The following rules are shared with webhooks-proxy:
+        #
+        # Forbid internal networks since a lot of systems assume that our
+        # entire PI space is trustworthy.  Problematic for webhooks to
+        # internal services, but we allow those addresses in the next rule.
+        - dst: [
+            # IANA reserved, special and private networks.
+            0.0.0.0/8, 10.0.0.0/8, 169.254.0.0/16,
+            172.16.0.0/12, 192.168.0.0/16, 127.0.0.0/8,
+            224.0.0.0/4, 240.0.0.0/4,
+            # Canonical networks.
+            91.189.88.0/21, 162.213.32.0/22, 185.125.188.0/22,
+            194.169.254.0/24,
+            "2001:67c:1360::/48", "2001:67c:1560::/46",
+            "2620:2d:4000::/44",
+            ]
+          http_access: deny
+        # IPv6 is not supported in PS4.5, but disallowing it entirely is not
+        # an option as squid will reject anything with an AAAA even if it
+        # has As too.
+        #
+        # The "ipv4" matcher does not work, and "ipv6" is a little too
+        # liberal: within the IPv6 address space we only want 2000::/3.
+        # "0.0.0.0/0" is internally overridden to "all", matching IPv6 too,
+        # so we instead use "0.0.0.0/1" and "128.0.0.0/1" to match IPv4.
+        #
+        # Note also that Squid internally handles IPv4 by using its mapping
+        # into IPv6, so if you deny ::/3 before allowing IPv4 you will have
+        # a bad.
+        # time.
+        - port: [80, 443, 9418]
+          method: [GET, POST, CONNECT, OPTIONS, PROPFIND, REPORT]
+          dst: [0.0.0.0/1, 128.0.0.1/1, "2000::/3"]
+          http_access: allow
+        # Deny anything with any IP address that has not matched already.
+        - dst: ["::/0"]
+          http_access: deny
+        # Allow anything leftover.  The FQDN probably has no IP address, so
+        # we want to return a DNS error rather than a permission violation.
+        - port: [80, 443, 9418]
+          method: [GET, POST, CONNECT, OPTIONS, PROPFIND, REPORT]
+          http_access: allow
+      dns_v4_first: true
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      port_options: ""
+      wait_for_auth_helper: true
+  rutabaga:
+    charm: {{ charm_dir }}/rutabaga
+    expose: true
+    num_units: 1
+    options:
+      admin_api_username: "{{ admin_api_username }}"
+      admin_api_secret: include-file://{{ local_dir }}/admin-api-secret
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      token_ttl_minutes: 180
+  rutabaga-auth-helper:
+    charm: {{ charm_dir }}/rutabaga-auth-helper
+    options:
+      credentials_ttl_seconds: 1
+  nrpe:
+    charm: cs:nrpe-73
+    options:
+      export_nagios_definitions: true
+      hostgroups: "{{ nagios_hostgroups }}"
+      nagios_host_context: "{{ nagios_context }}"
+      nagios_master: "{{ nagios_master }}"
+relations:
+  - ["rutabaga", "rutabaga-auth-helper"]
+  - ["rutabaga-auth-helper:squid-auth-helper", "squid-reverseproxy:auth-helper"]
+  - ["rutabaga:squid", "squid-reverseproxy"]
+  - ["nrpe", "rutabaga"]
+  - ["nrpe", "squid-reverseproxy:nrpe-external-master"]
diff --git a/lp-builder-proxy/collect-charm-upgrades b/lp-builder-proxy/collect-charm-upgrades
new file mode 100644
index 0000000..21c5164
--- /dev/null
+++ b/lp-builder-proxy/collect-charm-upgrades
@@ -0,0 +1,2 @@
+# Add any charm upgrades which are part of the current deployment.
+# This file can be empty when there are none.
diff --git a/lp-builder-proxy/collect-charms b/lp-builder-proxy/collect-charms
new file mode 100644
index 0000000..d3c7cd0
--- /dev/null
+++ b/lp-builder-proxy/collect-charms
@@ -0,0 +1,2 @@
+rutabaga		git+https://git.launchpad.net/~launchpad/rutabaga/+git/charm-build-rutabaga;revno=build/6552986a9bb1a9a67cee8cc53f82e9a2e6aebe64
+rutabaga-auth-helper	git+https://git.launchpad.net/~launchpad/rutabaga/+git/charm-build-rutabaga-auth-helper;revno=build/6552986a9bb1a9a67cee8cc53f82e9a2e6aebe64
diff --git a/lp-builder-proxy/collect-payload b/lp-builder-proxy/collect-payload
new file mode 100644
index 0000000..96f1955
--- /dev/null
+++ b/lp-builder-proxy/collect-payload
@@ -0,0 +1,2 @@
+rutabaga-code-tip	git+https://git.launchpad.net/rutabaga
+rutabaga-dependencies	git+https://git.launchpad.net/~canonical-launchpad-branches/rutabaga/+git/dependencies
diff --git a/lp-builder-proxy/configs/custom-secgroups-production.yaml b/lp-builder-proxy/configs/custom-secgroups-production.yaml
new file mode 100644
index 0000000..c0e89c6
--- /dev/null
+++ b/lp-builder-proxy/configs/custom-secgroups-production.yaml
@@ -0,0 +1,13 @@
+applications:
+    squid-reverseproxy:
+        type: neutron
+        rules:
+            - rsync-logs
+    rutabaga:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
diff --git a/lp-builder-proxy/configs/custom-secgroups-qastaging.yaml b/lp-builder-proxy/configs/custom-secgroups-qastaging.yaml
new file mode 100644
index 0000000..ff5f38c
--- /dev/null
+++ b/lp-builder-proxy/configs/custom-secgroups-qastaging.yaml
@@ -0,0 +1,31 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    telegraf:
+        type: neutron
+        rules:
+            - metrics
+    squid-reverseproxy:
+        type: neutron
+        rules:
+            - rsync-logs
+    rutabaga:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from devops-nagios.ps5.internal.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "10.131.26.77/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "10.131.26.77/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "10.131.26.77/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "10.131.26.77/32"}
+    metrics:
+        # Allow prometheus on cyrano and mairu to scrape telegraf.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.95.24/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.94.60/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
diff --git a/lp-builder-proxy/manifest b/lp-builder-proxy/manifest
new file mode 120000
index 0000000..6893813
--- /dev/null
+++ b/lp-builder-proxy/manifest
@@ -0,0 +1 @@
+manifests/ci-run
\ No newline at end of file
diff --git a/lp-builder-proxy/manifest-perform-autodeploy b/lp-builder-proxy/manifest-perform-autodeploy
new file mode 120000
index 0000000..2290e8d
--- /dev/null
+++ b/lp-builder-proxy/manifest-perform-autodeploy
@@ -0,0 +1 @@
+manifests/update-code-asset
\ No newline at end of file
diff --git a/lp-builder-proxy/manifest-verify b/lp-builder-proxy/manifest-verify
new file mode 120000
index 0000000..6e02de4
--- /dev/null
+++ b/lp-builder-proxy/manifest-verify
@@ -0,0 +1 @@
+manifests/verify
\ No newline at end of file
diff --git a/lp-builder-proxy/manifests/ci-run b/lp-builder-proxy/manifests/ci-run
new file mode 100644
index 0000000..61980ee
--- /dev/null
+++ b/lp-builder-proxy/manifests/ci-run
@@ -0,0 +1,3 @@
+include config=manifests/initial-deployment
+include config=manifests/upgrade-deployment
+include config=manifests/update-code-asset
diff --git a/lp-builder-proxy/manifests/initial-deployment b/lp-builder-proxy/manifests/initial-deployment
new file mode 100644
index 0000000..8bd6466
--- /dev/null
+++ b/lp-builder-proxy/manifests/initial-deployment
@@ -0,0 +1,8 @@
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charms
+script config=predeploy
+bundle config=bundle.yaml local=deploy-secrets
+juju-check-wait
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-builder-proxy/manifests/rollback-code-asset b/lp-builder-proxy/manifests/rollback-code-asset
new file mode 100644
index 0000000..ad513ae
--- /dev/null
+++ b/lp-builder-proxy/manifests/rollback-code-asset
@@ -0,0 +1,9 @@
+# Check for juju status errors, but don't verify service health; something
+# is presumably wrong given that we're rolling back.
+juju-check-wait
+
+# Roll back the code asset, by default to the asset that we just updated
+# from, but with the option to override.
+script config=update-code-asset ROLLBACK=true
+
+include config=manifests/verify
diff --git a/lp-builder-proxy/manifests/secgroups b/lp-builder-proxy/manifests/secgroups
new file mode 100644
index 0000000..8c438ee
--- /dev/null
+++ b/lp-builder-proxy/manifests/secgroups
@@ -0,0 +1 @@
+script config=utils/custom-secgroups.py SKIP_STAGES=devel
diff --git a/lp-builder-proxy/manifests/update-code-asset b/lp-builder-proxy/manifests/update-code-asset
new file mode 100644
index 0000000..f3dd46c
--- /dev/null
+++ b/lp-builder-proxy/manifests/update-code-asset
@@ -0,0 +1,8 @@
+include config=manifests/verify
+
+# Update the code asset, by default to the latest successfully tested asset,
+# but with the option to override.
+collect config=collect-payload
+script config=update-code-asset
+
+include config=manifests/verify
diff --git a/lp-builder-proxy/manifests/upgrade-deployment b/lp-builder-proxy/manifests/upgrade-deployment
new file mode 100644
index 0000000..f99c890
--- /dev/null
+++ b/lp-builder-proxy/manifests/upgrade-deployment
@@ -0,0 +1,10 @@
+include config=manifests/verify
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charm-upgrades
+script config=predeploy
+script config=upgrade-charms
+bundle config=bundle.yaml delay=0
+juju-check-wait
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-builder-proxy/manifests/verify b/lp-builder-proxy/manifests/verify
new file mode 100644
index 0000000..f25f902
--- /dev/null
+++ b/lp-builder-proxy/manifests/verify
@@ -0,0 +1,4 @@
+juju-check-wait
+# It occasionally takes a little while for all the servers to start
+# accepting connections.
+verify retry=3
diff --git a/lp-builder-proxy/predeploy b/lp-builder-proxy/predeploy
new file mode 100755
index 0000000..2a46e6e
--- /dev/null
+++ b/lp-builder-proxy/predeploy
@@ -0,0 +1,47 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+create_admin_api_secret () {
+    local secret="$1"
+
+    [ ! -e "$secret" ] || return 0
+
+    # The admin API secret should normally remain constant across
+    # deployments, but when we need to rotate it, we just need to remove it
+    # from ${MOJO_LOCAL_DIR}/admin-api-secret and redeploy.
+    python3 -c '
+import random
+import string
+import sys
+
+rnd = random.SystemRandom()
+with open(sys.argv[1], "w") as f:
+    f.write("".join(
+        rnd.choice(string.ascii_uppercase + string.digits) for _ in range(32)))
+' "$secret"
+}
+
+create_admin_api_secret "${MOJO_LOCAL_DIR}/admin-api-secret"
+
+case "${MOJO_STAGE##*/}" in
+    devel)
+        branch=tip
+        ;;
+    *)
+        branch=staging
+        ;;
+esac
+
+CONTAINER_NAME="${MOJO_DOWNLOADER_CONTAINER_NAME:-${MOJO_PROJECT}-builds}"
+STORAGE_URL="$(${TOP}/utils/get-swift-storage-url)"
+RUTABAGA_BUILD_LABEL="$(
+    git -C "${MOJO_BUILD_DIR}/rutabaga-code-${branch}" rev-parse HEAD)"
+${TOP}/utils/set-local-config --bundle --default rutabaga rutabaga \
+    swift_container_name="${CONTAINER_NAME}" \
+    swift_storage_url="${STORAGE_URL}"
+${TOP}/utils/set-local-config --bundle rutabaga rutabaga \
+    build_label="${RUTABAGA_BUILD_LABEL}"
+
+exit 0
diff --git a/lp-builder-proxy/update-code-asset b/lp-builder-proxy/update-code-asset
new file mode 100755
index 0000000..e1c9105
--- /dev/null
+++ b/lp-builder-proxy/update-code-asset
@@ -0,0 +1,64 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os
+import subprocess
+import sys
+
+import requests
+
+from utils import utils
+
+
+container_name = os.environ.get(
+    'MOJO_DOWNLOADER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+
+# Get the appropriate deployment artifact from swift.
+is_ci_run = 'CI_RUN' in os.environ
+if is_ci_run:
+    branch = 'tip'
+else:
+    branch = 'staging'
+
+combined_build_label = os.environ.get('BUILD_LABEL', '')
+if not combined_build_label:
+    labels = []
+    for payload_name in utils.payload_names():
+        revid = utils.get_code_branch_revision(payload_name, branch=branch)
+        labels.append('{}={}'.format(payload_name, revid))
+    combined_build_label = ':'.join(labels)
+if combined_build_label:
+    # Verify that the given builds exist on swift.
+    build_labels = utils.split_build_label(combined_build_label)
+    storage_url = utils.get_swift_auth(anonymous=True)[0]
+    for payload_name, payload_label in build_labels.items():
+        object_url = '{}/{}/{}-builds-focal/{}/{}.tar.gz'.format(
+            storage_url, container_name, payload_name, payload_label,
+            payload_name)
+        response = requests.head(object_url)
+        if response.status_code != 200:
+            sys.stdout.write("Unable to fetch {} from Swift: {} {}".format(
+                object_url, response.status_code, response.reason))
+            sys.exit(1)
+else:
+    sys.stdout.write("Unable to find latest build label from Swift.")
+    sys.exit(1)
+
+juju_services = utils.juju_services()
+for payload_name, payload_label in build_labels.items():
+    current_revid = subprocess.check_output(
+        ['juju', 'config', 'rutabaga', 'build_label'],
+        universal_newlines=True).rstrip('\n')
+    if payload_label == current_revid:
+        sys.stdout.write(
+            "The current rutabaga revision ID ({}) already matches {}.\n"
+            "Skipping setting build_label.\n".format(
+                current_revid, payload_label))
+        continue
+
+    sys.stdout.write("Updating rutabaga code asset from {} to {}.\n".format(
+        current_revid, payload_label))
+    subprocess.check_call([
+        'juju', 'config', 'rutabaga',
+        'build_label={}'.format(payload_label)])
diff --git a/lp-builder-proxy/upgrade-charms b/lp-builder-proxy/upgrade-charms
new file mode 100755
index 0000000..2a73904
--- /dev/null
+++ b/lp-builder-proxy/upgrade-charms
@@ -0,0 +1,23 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os.path
+import subprocess
+
+from utils import utils
+
+
+juju_services = utils.juju_services()
+app_names = [
+    'rutabaga',
+    'rutabaga-auth-helper',
+    ]
+
+for app_name in app_names:
+    if app_name not in juju_services:
+        continue
+    subprocess.check_call([
+        'juju', 'upgrade-charm', app_name,
+        '--path=%s' % os.path.join(
+            os.environ['MOJO_REPO_DIR'], os.environ['MOJO_SERIES'], app_name)])
diff --git a/lp-builder-proxy/utils b/lp-builder-proxy/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/lp-builder-proxy/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file
diff --git a/lp-builder-proxy/verify b/lp-builder-proxy/verify
new file mode 100755
index 0000000..df9e6ef
--- /dev/null
+++ b/lp-builder-proxy/verify
@@ -0,0 +1,8 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+export EXTRA_SKIP_CHECKS="check_swap${EXTRA_SKIP_CHECKS:+|${EXTRA_SKIP_CHECKS}}"
+
+exec "$TOP/utils/verify"
diff --git a/lp-codeimport/README.md b/lp-codeimport/README.md
new file mode 100644
index 0000000..aa21f2c
--- /dev/null
+++ b/lp-codeimport/README.md
@@ -0,0 +1,28 @@
+# Launchpad code import worker
+
+This spec deploys the Launchpad code import worker.
+
+You can run it locally using Juju's LXD support and Mojo.  First, configure
+your environment to download payload builds from the output of our Jenkins
+jobs:
+
+    export MOJO_ROOT="$HOME/.local/share/mojo"
+    export MOJO_DOWNLOADER_STORAGE_URL=https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_f32a0503483444aa8dd95cbdbca7846f
+    export MOJO_DOWNLOADER_CONTAINER_NAME=lp-codeimport-builds
+
+Then run the spec using Mojo:
+
+    mojo project-new -s xenial -c containerless mojo-lp-codeimport
+    mojo workspace-new -p mojo-lp-codeimport -s xenial \
+        --stage lp-codeimport/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+    mojo run -p mojo-lp-codeimport -s xenial --stage lp-codeimport/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+
+You must have python-requests, python-yaml, and python-swiftclient
+installed.
+
+This spec stores its code payload in Swift, so you must have suitable
+credentials in your environment.  The easiest way to handle this for
+development is to [deploy a local Swift instance
+first](https://www.chiark.greenend.org.uk/~cjwatson/blog/deploying-swift.html).
diff --git a/lp-codeimport/collect-charm-upgrades b/lp-codeimport/collect-charm-upgrades
new file mode 100644
index 0000000..1097c29
--- /dev/null
+++ b/lp-codeimport/collect-charm-upgrades
@@ -0,0 +1,3 @@
+# Add any charm upgrades which are part of the current deployment.
+# This file can be empty when there are none.
+lp-codeimport			git+https://git.launchpad.net/~launchpad/lp-codeimport/+git/charm-build-lp-codeimport;revno=build/b10b2e0a2fdd331491ac63d65ffc7ca1313ec151
diff --git a/lp-codeimport/collect-charms b/lp-codeimport/collect-charms
new file mode 100644
index 0000000..7508b3f
--- /dev/null
+++ b/lp-codeimport/collect-charms
@@ -0,0 +1,5 @@
+# Services
+lp-codeimport			git+https://git.launchpad.net/~launchpad/lp-codeimport/+git/charm-build-lp-codeimport;revno=build/2f3897caeca15946db88c220b5880c9d210826f3
+
+# Subordinates
+nrpe				cs:nrpe-73
diff --git a/lp-codeimport/collect-payload b/lp-codeimport/collect-payload
new file mode 100644
index 0000000..15a9774
--- /dev/null
+++ b/lp-codeimport/collect-payload
@@ -0,0 +1,2 @@
+lp-codeimport-code-tip		git+https://git.launchpad.net/lp-codeimport
+lp-codeimport-dependencies	git+https://git.launchpad.net/~launchpad/lp-codeimport/+git/dependencies
diff --git a/lp-codeimport/configs/custom-secgroups-production.yaml b/lp-codeimport/configs/custom-secgroups-production.yaml
new file mode 100644
index 0000000..36796de
--- /dev/null
+++ b/lp-codeimport/configs/custom-secgroups-production.yaml
@@ -0,0 +1,19 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    lp-codeimport:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from nagios.ps5.canonical.com.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "185.125.188.164/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "185.125.188.164/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "185.125.188.164/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "185.125.188.164/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
diff --git a/lp-codeimport/configs/custom-secgroups-qastaging.yaml b/lp-codeimport/configs/custom-secgroups-qastaging.yaml
new file mode 100644
index 0000000..80bfa02
--- /dev/null
+++ b/lp-codeimport/configs/custom-secgroups-qastaging.yaml
@@ -0,0 +1,19 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    lp-codeimport:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from wendigo.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "91.189.90.53/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "91.189.90.53/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
diff --git a/lp-codeimport/configs/custom-secgroups-staging.yaml b/lp-codeimport/configs/custom-secgroups-staging.yaml
new file mode 100644
index 0000000..80bfa02
--- /dev/null
+++ b/lp-codeimport/configs/custom-secgroups-staging.yaml
@@ -0,0 +1,19 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    lp-codeimport:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from wendigo.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "91.189.90.53/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "91.189.90.53/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
diff --git a/lp-codeimport/manifest b/lp-codeimport/manifest
new file mode 120000
index 0000000..6893813
--- /dev/null
+++ b/lp-codeimport/manifest
@@ -0,0 +1 @@
+manifests/ci-run
\ No newline at end of file
diff --git a/lp-codeimport/manifest-perform-autodeploy b/lp-codeimport/manifest-perform-autodeploy
new file mode 120000
index 0000000..2290e8d
--- /dev/null
+++ b/lp-codeimport/manifest-perform-autodeploy
@@ -0,0 +1 @@
+manifests/update-code-asset
\ No newline at end of file
diff --git a/lp-codeimport/manifest-verify b/lp-codeimport/manifest-verify
new file mode 120000
index 0000000..6e02de4
--- /dev/null
+++ b/lp-codeimport/manifest-verify
@@ -0,0 +1 @@
+manifests/verify
\ No newline at end of file
diff --git a/lp-codeimport/manifests/ci-run b/lp-codeimport/manifests/ci-run
new file mode 100644
index 0000000..e8f6454
--- /dev/null
+++ b/lp-codeimport/manifests/ci-run
@@ -0,0 +1,3 @@
+include config=manifests/initial-deployment
+include config=manifests/upgrade-deployment
+include config=manifests/update-code-asset-ci-run
diff --git a/lp-codeimport/manifests/initial-deployment b/lp-codeimport/manifests/initial-deployment
new file mode 100644
index 0000000..60ef68b
--- /dev/null
+++ b/lp-codeimport/manifests/initial-deployment
@@ -0,0 +1,8 @@
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charms
+script config=predeploy
+deploy config=services delay=0 local=deploy-secrets max-wait=900
+juju-check-wait
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-codeimport/manifests/rollback-code-asset b/lp-codeimport/manifests/rollback-code-asset
new file mode 100644
index 0000000..ad513ae
--- /dev/null
+++ b/lp-codeimport/manifests/rollback-code-asset
@@ -0,0 +1,9 @@
+# Check for juju status errors, but don't verify service health; something
+# is presumably wrong given that we're rolling back.
+juju-check-wait
+
+# Roll back the code asset, by default to the asset that we just updated
+# from, but with the option to override.
+script config=update-code-asset ROLLBACK=true
+
+include config=manifests/verify
diff --git a/lp-codeimport/manifests/secgroups b/lp-codeimport/manifests/secgroups
new file mode 100644
index 0000000..8c438ee
--- /dev/null
+++ b/lp-codeimport/manifests/secgroups
@@ -0,0 +1 @@
+script config=utils/custom-secgroups.py SKIP_STAGES=devel
diff --git a/lp-codeimport/manifests/update-code-asset b/lp-codeimport/manifests/update-code-asset
new file mode 100644
index 0000000..f3dd46c
--- /dev/null
+++ b/lp-codeimport/manifests/update-code-asset
@@ -0,0 +1,8 @@
+include config=manifests/verify
+
+# Update the code asset, by default to the latest successfully tested asset,
+# but with the option to override.
+collect config=collect-payload
+script config=update-code-asset
+
+include config=manifests/verify
diff --git a/lp-codeimport/manifests/update-code-asset-ci-run b/lp-codeimport/manifests/update-code-asset-ci-run
new file mode 100644
index 0000000..5d54629
--- /dev/null
+++ b/lp-codeimport/manifests/update-code-asset-ci-run
@@ -0,0 +1,4 @@
+juju-check-wait
+script config=update-code-asset CI_RUN=true
+juju-check-wait
+include config=manifests/verify
diff --git a/lp-codeimport/manifests/upgrade-deployment b/lp-codeimport/manifests/upgrade-deployment
new file mode 100644
index 0000000..c760c6a
--- /dev/null
+++ b/lp-codeimport/manifests/upgrade-deployment
@@ -0,0 +1,12 @@
+include config=manifests/verify
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charms
+collect config=collect-charm-upgrades
+script config=predeploy
+script config=upgrade-charms
+script config=utils/set-service-options
+deploy config=services delay=0 local=deploy-secrets
+juju-check-wait
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-codeimport/manifests/verify b/lp-codeimport/manifests/verify
new file mode 100644
index 0000000..9eba6e4
--- /dev/null
+++ b/lp-codeimport/manifests/verify
@@ -0,0 +1,4 @@
+juju-check-wait
+# It occasionally takes a little while for all the servers to start
+# accepting connections.
+verify retry=5
diff --git a/lp-codeimport/predeploy b/lp-codeimport/predeploy
new file mode 100755
index 0000000..9f3b975
--- /dev/null
+++ b/lp-codeimport/predeploy
@@ -0,0 +1,60 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+create_ssh_key () {
+    local key="$1"
+
+    if [ ! -e "$key" ]; then
+        echo "Generating SSH key for testing..."
+        ssh-keygen -t rsa -b 2048 -f "$key" -N ''
+    fi
+}
+
+create_gpg_key () {
+    local key_prefix="$1"
+
+    if [ ! -e "$key_prefix.sec" ]; then
+        local tmp
+        tmp="$(mktemp -d)"
+        cat >"$tmp/parameters" <<EOF
+%no-protection
+Key-Type: RSA
+Key-Length: 2048
+Key-Usage: sign
+Name-Real: VCS Import Daemon
+Name-Email: vcs-imports@xxxxxxxxxxxxxx
+Expire-Date: 0
+%commit
+EOF
+        gpg --homedir "$tmp" --batch --generate-key "$tmp/parameters"
+        gpg --homedir "$tmp" --export-secret-key --armor >"$key_prefix.sec"
+        gpg --homedir "$tmp" --export --armor >"$key_prefix.pub"
+        rm -rf "$tmp"
+    fi
+}
+
+case "${MOJO_STAGE##*/}" in
+    devel)
+        branch=tip
+        ;;
+    *)
+        branch=staging
+        ;;
+esac
+
+CONTAINER_NAME="${MOJO_DOWNLOADER_CONTAINER_NAME:-${MOJO_PROJECT}-builds}"
+STORAGE_URL="$(${TOP}/utils/get-swift-storage-url)"
+LP_CODEIMPORT_BUILD_LABEL="$(
+    git -C "${MOJO_BUILD_DIR}/lp-codeimport-code-${branch}" rev-parse HEAD)"
+${TOP}/utils/set-local-config --default lp-codeimport lp-codeimport \
+    swift_container_name="${CONTAINER_NAME}" \
+    swift_storage_url="${STORAGE_URL}"
+${TOP}/utils/set-local-config lp-codeimport lp-codeimport \
+    build_label="${LP_CODEIMPORT_BUILD_LABEL}"
+
+create_ssh_key "${MOJO_LOCAL_DIR}/ssh-key"
+create_gpg_key "${MOJO_LOCAL_DIR}/gpg-key"
+
+exit 0
diff --git a/lp-codeimport/services b/lp-codeimport/services
new file mode 100644
index 0000000..3ea49c3
--- /dev/null
+++ b/lp-codeimport/services
@@ -0,0 +1,114 @@
+{%- if stage_name == "production" %}
+{%-   set bazaar_branch_store = "sftp://hoover@xxxxxxxxxxxxxxxxxxxxxx.internal/srv/importd/www/"; %}
+{%-   set bzr_identity = "VCS Import Daemon <vcs-imports@xxxxxxxxxxxxx>" %}
+{%-   set codeimport_constraints = "cores=2 mem=4096M root-disk-source=volume root-disk=60G" %}
+{%-   set codeimport_scale = 4 %}
+{%-   set devel = False %}
+{%-   set error_email = "launchpad-error-reports@xxxxxxxxxxxxxxxxxxx" %}
+{%-   set foreign_tree_store = "sftp://hoover@xxxxxxxxxxxxxxxxxxxxxx.internal/srv/importd/sources/"; %}
+{%-   set git_hostname = "git.launchpad.net" %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set nagios_context = "lp-prodstack-codeimport" %}
+{%-   set nagios_hostgroups = "prodstack-lp" %}
+{%-   set nagios_master = "nagios.ps5.internal" %}
+{%-   set nagios_servicegroups = "prompt-critical" %}
+{%-   set oops_prefix = "PRODUCTION" %}
+{%-   set rabbitmq_host = "ackee.canonical.com:5672" %}
+{%-   set rabbitmq_user = "production" %}
+{%-   set rabbitmq_virtual_host = "launchpad.net" %}
+{%-   set scheduler_endpoint = "http://xmlrpc.lp.internal:8097/codeimportscheduler"; %}
+{%- elif stage_name == "staging" %}
+{%-   set bazaar_branch_store = "sftp://supermirror@xxxxxxxxxxxxxxxxxxxxxxx:922/home/supermirror/importd-push-branches"; %}
+{%-   set bzr_identity = "VCS Import Daemon <vcs-imports@xxxxxxxxxxxxxxxxxxxxx>" %}
+{%-   set codeimport_constraints = "cores=2 mem=4096M" %}
+{%-   set codeimport_scale = 2 %}
+{%-   set devel = False %}
+{%-   set error_email = "launchpad-error-reports@xxxxxxxxxxxxxxxxxxx" %}
+{%-   set foreign_tree_store = "sftp://supermirror@xxxxxxxxxxxxxxxxxxxxxxx:922/home/supermirror/foreign-trees"; %}
+{%-   set git_hostname = "git.staging.paddev.net" %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set nagios_context = "lp-stagingstack-codeimport" %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   set nagios_master = "wendigo.canonical.com" %}
+{%-   set oops_prefix = "STAGING" %}
+{%-   set rabbitmq_host = "rabbitmq.staging.lp.internal:5672" %}
+{%-   set rabbitmq_user = "staging" %}
+{%-   set rabbitmq_virtual_host = "staging.launchpad.net" %}
+{%-   set scheduler_endpoint = "http://xmlrpc.staging.lp.internal:8097/codeimportscheduler"; %}
+{%- elif stage_name == "qastaging" %}
+{%-   set bazaar_branch_store = "sftp://supermirror@xxxxxxxxxxxxxxxxxxxxxxx:922/home/supermirror/importd-push-branches"; %}
+{%-   set bzr_identity = "VCS Import Daemon <vcs-imports@xxxxxxxxxxxxxxxxxxxxxxx>" %}
+{%-   set codeimport_constraints = "cores=2 mem=4096M" %}
+{%-   set codeimport_scale = 2 %}
+{%-   set devel = False %}
+{%-   set error_email = "launchpad-error-reports@xxxxxxxxxxxxxxxxxxx" %}
+{%-   set foreign_tree_store = "sftp://supermirror@xxxxxxxxxxxxxxxxxxxxxxx:922/home/supermirror/foreign-trees"; %}
+{%-   set git_hostname = "git.qastaging.paddev.net" %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set nagios_context = "lp-stagingstack-codeimport" %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   set nagios_master = "wendigo.canonical.com" %}
+{%-   set oops_prefix = "QASTAGING" %}
+{%-   set rabbitmq_host = "rabbitmq.qastaging.lp.internal:5672" %}
+{%-   set rabbitmq_user = "qastaging" %}
+{%-   set rabbitmq_virtual_host = "qastaging.launchpad.net" %}
+{%-   set scheduler_endpoint = "http://xmlrpc.qastaging.lp.internal:9097/codeimportscheduler"; %}
+{%- else %}
+{%-   set bazaar_branch_store = "sftp://hoover@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/srv/importd/www/"; %}
+{%-   set bzr_identity = "VCS Import Daemon <vcs-imports@xxxxxxxxxxxxxx>" %}
+{%-   set codeimport_scale = 1 %}
+{%-   set devel = True %}
+{%-   set foreign_tree_store = "sftp://hoover@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/srv/importd/sources/"; %}
+{%-   set git_hostname = "git.launchpad.test" %}
+{%-   set log_hosts_allow = "" %}
+{%-   set nagios_context = "lp-devel-codeimport" %}
+{%-   set nagios_hostgroups = "devel-lp" %}
+{#-   The configured nagios_master doesn't have to be real, but it does have
+      to resolve. #}
+{%-   set nagios_master = "localhost" %}
+{%-   set oops_prefix = "DEVEL" %}
+{%-   set scheduler_endpoint = "http://xmlrpc-private.launchpad.test:8087/codeimportscheduler"; %}
+{%- endif -%}
+lp-codeimport:
+  series: "{{ series }}"
+  services:
+    lp-codeimport:
+      charm: lp-codeimport
+      constraints: "{{ codeimport_constraints }}"
+      num_units: {{ codeimport_scale }}
+      options:
+        bazaar_branch_store: "{{ bazaar_branch_store }}"
+{%- if error_email %}
+        error_email: "{{ error_email }}"
+{%- endif %}
+        foreign_tree_store: "{{ foreign_tree_store }}"
+{%- if devel %}
+        git_certificate: include-base64://{{ local_dir }}/{{ git_hostname }}.crt
+{%- endif %}
+        git_hostname: "{{ git_hostname }}"
+        log_hosts_allow: "{{ log_hosts_allow }}"
+        nagios_context: "{{ nagios_context }}"
+{%- if nagios_servicegroups %}
+        nagios_servicegroups: "{{ nagios_servicegroups }},{{ nagios_context }}"
+{%- endif %}
+        oops_prefix: "{{ oops_prefix }}"
+        private_gpg_key: include-base64://{{ local_dir }}/gpg-key.sec
+        public_gpg_key: include-base64://{{ local_dir }}/gpg-key.pub
+        private_ssh_key: include-base64://{{ local_dir }}/ssh-key
+        public_ssh_key: include-base64://{{ local_dir }}/ssh-key.pub
+{%- if rabbitmq_host %}
+        rabbitmq_host: "{{ rabbitmq_host }}"
+        rabbitmq_password: include-file://{{ local_dir }}/rabbitmq-password
+        rabbitmq_user: "{{ rabbitmq_user }}"
+        rabbitmq_virtual_host: "{{ rabbitmq_virtual_host }}"
+{%- endif %}
+        scheduler_endpoint: "{{ scheduler_endpoint }}"
+    nrpe:
+      charm: nrpe
+      options:
+        hostgroups: "{{ nagios_hostgroups }}"
+        nagios_host_context: "{{ nagios_context }}"
+        export_nagios_definitions: true
+        nagios_master: "{{ nagios_master }}"
+  relations:
+    - ["nrpe", "lp-codeimport"]
diff --git a/lp-codeimport/update-code-asset b/lp-codeimport/update-code-asset
new file mode 100755
index 0000000..e7386e3
--- /dev/null
+++ b/lp-codeimport/update-code-asset
@@ -0,0 +1,65 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os
+import subprocess
+import sys
+
+import requests
+
+from utils import utils
+
+
+container_name = os.environ.get(
+    'MOJO_DOWNLOADER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+
+# Get the appropriate deployment artifact from swift.
+is_ci_run = 'CI_RUN' in os.environ
+if is_ci_run:
+    branch = 'tip'
+else:
+    branch = 'staging'
+
+combined_build_label = os.environ.get('BUILD_LABEL', '')
+if not combined_build_label:
+    labels = []
+    for payload_name in utils.payload_names():
+        revid = utils.get_code_branch_revision(payload_name, branch=branch)
+        labels.append('{}={}'.format(payload_name, revid))
+    combined_build_label = ':'.join(labels)
+if combined_build_label:
+    # Verify that the given builds exist on swift.
+    build_labels = utils.split_build_label(combined_build_label)
+    storage_url = utils.get_swift_auth(anonymous=True)[0]
+    for payload_name, payload_label in build_labels.items():
+        object_url = '{}/{}/{}-builds/{}/{}.tar.gz'.format(
+            storage_url, container_name, payload_name, payload_label,
+            payload_name)
+        response = requests.head(object_url)
+        if response.status_code != 200:
+            sys.stdout.write("Unable to fetch {} from Swift: {} {}".format(
+                object_url, response.status_code, response.reason))
+            sys.exit(1)
+else:
+    sys.stdout.write("Unable to find latest build label from swift.")
+    sys.exit(1)
+
+juju_services = utils.juju_services()
+for payload_name, payload_label in build_labels.items():
+    current_revid = subprocess.check_output(
+        ['juju', 'config', 'lp-codeimport', 'build_label'],
+        universal_newlines=True).rstrip('\n')
+    if payload_label == current_revid:
+        sys.stdout.write(
+            "The current lp-codeimport revision ID ({}) already matches {}.\n"
+            "Skipping setting build_label.\n".format(
+                current_revid, payload_label))
+        continue
+
+    sys.stdout.write(
+        "Updating lp-codeimport code asset from {} to {}.\n".format(
+            current_revid, payload_label))
+    subprocess.check_call([
+        'juju', 'config', 'lp-codeimport',
+        'build_label={}'.format(payload_label)])
diff --git a/lp-codeimport/upgrade-charms b/lp-codeimport/upgrade-charms
new file mode 100755
index 0000000..966737c
--- /dev/null
+++ b/lp-codeimport/upgrade-charms
@@ -0,0 +1,23 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os.path
+import subprocess
+
+from utils import utils
+
+
+juju_services = utils.juju_services()
+app_names = [
+    'lp-codeimport',
+    'nrpe',
+    ]
+
+for app_name in app_names:
+    if app_name not in juju_services:
+        continue
+    subprocess.check_call([
+        'juju', 'upgrade-charm', app_name,
+        '--path=%s' % os.path.join(
+            os.environ['MOJO_REPO_DIR'], os.environ['MOJO_SERIES'], app_name)])
diff --git a/lp-codeimport/utils b/lp-codeimport/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/lp-codeimport/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file
diff --git a/lp-codeimport/verify b/lp-codeimport/verify
new file mode 100755
index 0000000..07173e1
--- /dev/null
+++ b/lp-codeimport/verify
@@ -0,0 +1,11 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+# Set EXTRA_SKIP_CHECKS, using '|' as the separator. e.g.
+# EXTRA_SKIP_CHECKS='check_ntpmon|check_hardware' mojo run -m manifest-verify
+
+export EXTRA_SKIP_CHECKS="check_swap${EXTRA_SKIP_CHECKS:+|${EXTRA_SKIP_CHECKS}}"
+
+exec "$TOP/utils/verify"
diff --git a/lp-git/README.md b/lp-git/README.md
new file mode 100644
index 0000000..e90c45d
--- /dev/null
+++ b/lp-git/README.md
@@ -0,0 +1,54 @@
+# Launchpad Git hosting service
+
+This spec deploys the Launchpad Git hosting service.
+
+You can run it locally using Juju's LXD support.  First, you need to make
+sure that the relevant LXD profile supports NFS mounts:
+
+    ./make-lxd-profile
+
+Then configure your environment to download payload builds from the output
+of our Jenkins jobs:
+
+    export MOJO_ROOT="$HOME/.local/share/mojo"
+    export MOJO_DOWNLOADER_STORAGE_URL=https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_539ac956fe03418fb6c184a99e156d85
+
+Then run the spec using Mojo:
+
+    mojo project-new -s bionic -c containerless mojo-lp-git
+    mojo workspace-new -p mojo-lp-git -s bionic --stage lp-git/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+    mojo run -p mojo-lp-git -s bionic --stage lp-git/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+
+You must have python-requests and python-yaml installed.
+
+
+## Testing locally mojo spec changes
+
+If you have changed mojo specs and would like to test it locally,
+make sure to have juju installed and with a model.
+
+If you don't have it, add one with `juju add-model git`.
+
+After that, go to the root directory of this repository and run:
+
+    mojo project-new -s bionic -c containerless mojo-lp-git
+    mojo workspace-new -p mojo-lp-git -s bionic --stage lp-git/devel . devel
+    mojo run -p mojo-lp-git -s bionic --stage lp-git/devel . devel
+
+
+## Testing locally charms changes
+
+If, instead of the mojo spec itself, you have changed some Turnip charms and
+would like to test them with the mojo spec, change and build the charms on
+Turnip project, and replace the charm path on `collect-charms` file to match
+your `dist` directory, like the following example:
+
+```
+(...)
+turnip-api                      git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-api;revno=build/135026bddc989b33a174d2fbcfa397d19b2f68a5
+turnip-celery                   /my/home/directory/launchpad/turnip/charm/dist/builds/turnip-celery
+```
+
+Then follow the instructions on _"Testing locally mojo spec changes"_ above.
diff --git a/lp-git/bundle.yaml b/lp-git/bundle.yaml
new file mode 100644
index 0000000..6d6f1d7
--- /dev/null
+++ b/lp-git/bundle.yaml
@@ -0,0 +1,407 @@
+{%- if stage_name in ("production", "production-ps5") %}
+{%-   if stage_name == "production-ps5" %}
+{%-     set git_constraints = "cores=8 mem=32768M root-disk-source=volume" %}
+{%-     set haproxy_constraints = "cores=2 mem=2048M root-disk-source=volume" %}
+{%-     set nfs_constraints = "cores=4 mem=65536M root-disk-source=volume" %}
+{%-     set rabbitmq_constraints = "cores=2 mem=2048M root-disk-source=volume" %}
+{%-   else %}
+{%-     set git_constraints = "cores=8 mem=32768M" %}
+{%-     set haproxy_constraints = "cores=2 mem=2048M" %}
+{%-     set nfs_constraints = "cores=4 mem=65536M" %}
+{%-     set rabbitmq_constraints = "cores=2 mem=2048M" %}
+{%-   endif %}
+{%-   set authentication_endpoint = "http://xmlrpc.lp.internal:8097/authserver"; %}
+{%-   set devel = False %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set main_site_root = "https://launchpad.net/"; %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32 10.172.0.0/16" %}
+{%-   set nagios_context = "lp-prodstack-git" %}
+{%-   set nagios_e2e_urls_git = "git://git.launchpad.net/launchpad" %}
+{%-   set nagios_e2e_urls_http = "https://git.launchpad.net/launchpad"; %}
+{%-   set nagios_hostgroups = "prodstack-lp" %}
+{%-   if stage_name == "production-ps5" %}
+{%-     set nagios_master = "nagios.ps5.internal" %}
+{%-   else %}
+{%-     set nagios_master = "wekufe.canonical.com" %}
+{%-   endif %}
+{%-   set openid_provider_root = "https://login.launchpad.net/"; %}
+{%-   set site_name = "git.launchpad.net" %}
+{%-   set stage_type = "production" %}
+{%-   set statsd_environment = "production" %}
+{%-   set turku_environment_name = "prodstack-lp-git" %}
+{%-   set virtinfo_endpoint = "http://xmlrpc.lp.internal:8097/git"; %}
+{%- elif stage_name in ("qastaging", "qastaging-ps5") %}
+{%-   if stage_name == "qastaging-ps5" %}
+{%-     set git_constraints = "cores=2 mem=8192M root-disk=50G root-disk-source=volume" %}
+{%-     set haproxy_constraints = "cores=2 mem=2048M root-disk-source=volume" %}
+{%-     set nfs_constraints = "cores=2 mem=8192M root-disk-source=volume" %}
+{%-     set rabbitmq_constraints = "cores=2 mem=2048M root-disk-source=volume" %}
+{%-   else %}
+{%-     set git_constraints = "cores=2 mem=8192M root-disk=50G" %}
+{%-     set haproxy_constraints = "cores=2 mem=2048M" %}
+{%-     set nfs_constraints = "cores=2 mem=2048M" %}
+{%-     set rabbitmq_constraints = "cores=2 mem=2048M" %}
+{%-   endif %}
+{%-   set devel = False %}
+{%-   set authentication_endpoint = "http://xmlrpc.qastaging.lp.internal:9097/authserver"; %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set main_site_root = "https://qastaging.launchpad.net/"; %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32 10.172.0.0/16" %}
+{%-   set nagios_context = "lp-stagingstack-git" %}
+{%-   set nagios_e2e_urls_git = "git://git.qastaging.paddev.net/launchpad" %}
+{%-   set nagios_e2e_urls_http = "https://git.qastaging.paddev.net/launchpad"; %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   if stage_name == "qastaging-ps5" %}
+{%-     set nagios_master = "devops-nagios.ps5.internal" %}
+{%-   else %}
+{%-     set nagios_master = "wendigo.canonical.com" %}
+{%-   endif %}
+{%-   set openid_provider_root = "https://login.launchpad.net/"; %}
+{%-   set site_name = "git.qastaging.paddev.net" %}
+{%-   set stage_type = "qastaging" %}
+{%-   set statsd_environment = "qastaging" %}
+{%-   set turku_environment_name = "stagingstack-lp-git" %}
+{%-   set virtinfo_endpoint = "http://xmlrpc.qastaging.lp.internal:9097/git"; %}
+{%- else %}
+{%-   set authentication_endpoint = "http://xmlrpc-private.launchpad.test:8087/authserver"; %}
+{%-   set devel = True %}
+{%-   set log_hosts_allow = "" %}
+{%-   set main_site_root = "https://launchpad.test/"; %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32" %}
+{%-   set nagios_context = "lp-devel-git" %}
+{%-   set nagios_e2e_urls_git = "" %}
+{%-   set nagios_e2e_urls_http = "" %}
+{%-   set nagios_hostgroups = "devel-lp" %}
+{#-   The configured nagios_master doesn't have to be real, but it does have
+      to resolve. #}
+{%-   set nagios_master = "localhost" %}
+{%-   set openid_provider_root = "https://testopenid.test/"; %}
+{%-   set site_name = "git.launchpad.test" %}
+{%-   set stage_type = "devel" %}
+{%-   set statsd_environment = "local" %}
+{%-   set ssl_cert_path = "git.launchpad.test.crt" %}
+{%-   set ssl_key_path = "git.launchpad.test.key" %}
+{#-   This is a bit confusing given that we also deploy turnipcake, but we
+      don't currently have a way to set the correct virtinfo_endpoint for
+      the latter, and in practice we usually want to test against Launchpad
+      anyway. #}
+{%-   set virtinfo_endpoint = "http://xmlrpc-private.launchpad.test:8087/git"; %}
+{%- endif -%}
+series: "{{ series }}"
+applications:
+  haproxy:
+    charm: {{ charm_dir }}/haproxy
+    constraints: "{{ haproxy_constraints }}"
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+{%- endif %}
+{%- if stage_name == "production" %}
+    to: ['12', '14']
+{%- endif %}
+    # Don't expose!  Not all of the ports are public.
+    options:
+      default_options: ""
+      default_timeouts: "queue 20000, client 3600000, connect 5000, server 3600000"
+      enable_monitoring: True
+      global_default_dh_param: 2048
+      monitoring_allowed_cidr: "{{ monitoring_allowed_cidr }}"
+      monitoring_stats_refresh: 3600
+      nagios_context: "{{ nagios_context }}"
+      peering_mode: active-active
+      services: |
+        - service_name: turnip-pack-frontend-https
+          service_host: 0.0.0.0
+          service_port: 443
+          service_options:
+            - default_backend turnip-pack-frontend-http
+            - option forwardfor
+            - acl unrestricted_networks src 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 91.189.88.0/21 162.213.32.0/22 185.125.188.0/22 194.169.254.0/24 "2001:67c:1360::/48" "2001:67c:1560::/46" "2620:2d:4000::/44" "2a06:bc80::/29"
+            - acl global_connlimit path_beg /ubuntu-cve-tracker/
+            - <FE> stick-table type string size 100 expire 15m store conn_cur
+            - <FE> http-request track-sc0 path if global_connlimit !unrestricted_networks
+            - <FE> http-request deny deny_status 503 if global_connlimit { sc0_conn_cur ge 10 } !unrestricted_networks
+            - maxconn 250
+{%- if not devel %}
+            - 'rspadd Strict-Transport-Security:\ max-age=15768000'
+{%- endif %}
+          crts: [DEFAULT]
+        - service_name: turnip-pack-frontend-http-redirect
+          service_host: 0.0.0.0
+          service_port: 80
+          service_options:
+            - mode http
+            - option httplog
+            - redirect scheme https code 301 if !{ ssl_fc }
+        - service_name: turnip-pack-frontend-http
+          service_host: 0.0.0.0
+          service_port: 9419
+          service_options: []
+{%- if devel %}
+      ssl_cert: include-base64://{{ local_dir }}/{{ ssl_cert_path }}
+      ssl_key: include-base64://{{ local_dir }}/{{ ssl_key_path }}
+{%- else %}
+      ssl_cert: DEFAULT
+      ssl_key: ""
+{%- endif %}
+      sysctl: |
+        net.ipv4.tcp_congestion_control: bbr
+{%- if not devel %}
+  autocert-haproxy:
+    charm: cs:~autocert-charmers/autocert-17
+    options:
+      autocert_host: autocert.canonical.com
+      cert_additional_names: "{{ site_name }}=default"
+      cert_auth_pairs: include-file://{{ local_dir }}/autocert-auth-pairs
+      dir_certs: /var/lib/haproxy
+      dir_keys: /var/lib/haproxy
+      service_action: reload
+      service_name: haproxy
+      suffix_cert: ".pem"
+      suffix_chain: ".pem"
+      suffix_key: ".pem"
+{%- endif %}
+  nfs-ganesha:
+    charm: cs:nfs-ganesha-5
+    constraints: "{{ nfs_constraints }}"
+{%- if stage_type == "production" %}
+    num_units: 2
+{%- else %}
+    num_units: 1
+{%- endif %}
+{%- if stage_name == "production" %}
+    to: ['13', '15']
+{%- endif %}
+    options:
+      install_keys: |
+        - null
+      install_sources: |
+{%- if stage_type == "production" %}
+        - ppa:nfs-ganesha-charmers/ubuntu/backports
+{%- else %}
+        - ppa:nfs-ganesha-charmers/ubuntu/backports-staging
+{%- endif %}
+      mount_options: "nfsvers=4.2,lookupcache=positive"
+      nagios_context: "{{ nagios_context }}"
+  turnip-pack-backend:
+    charm: {{ charm_dir }}/turnip-pack-backend
+    constraints: "{{ git_constraints }}"
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+{%- endif %}
+{%- if stage_name == "production" %}
+    to: [9, 11]
+{%- endif %}
+    options:
+      haproxy_server_options: "check maxconn 70"
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      nfs: true
+      statsd_prefix: "{{ statsd_prefix }}"
+      statsd_environment: "{{ statsd_environment }}"
+      virtinfo_endpoint: "{{ virtinfo_endpoint }}"
+  turnip-pack-virt:
+    charm: {{ charm_dir }}/turnip-pack-virt
+{%- if devel %}
+    num_units: 1
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    num_units: 2
+    to: [9, 11]
+{%- else %}
+    num_units: 2
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      virtinfo_endpoint: "{{ virtinfo_endpoint }}"
+  turnip-pack-frontend-git:
+    charm: {{ charm_dir }}/turnip-pack-frontend-git
+{%- if devel %}
+    num_units: 1
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    num_units: 2
+    to: [9, 11]
+{%- else %}
+    num_units: 2
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      nagios_e2e_urls: "{{ nagios_e2e_urls_git }}"
+  turnip-pack-frontend-ssh:
+    charm: {{ charm_dir }}/turnip-pack-frontend-ssh
+{%- if devel %}
+    num_units: 1
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    num_units: 2
+    to: [9, 11]
+{%- else %}
+    num_units: 2
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      authentication_endpoint: "{{ authentication_endpoint }}"
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      private_ssh_key: include-base64://{{ local_dir }}/ssh-host-key
+      public_ssh_key: include-base64://{{ local_dir }}/ssh-host-key.pub
+  turnip-pack-frontend-http:
+    charm: {{ charm_dir }}/turnip-pack-frontend-http
+    constraints: "{{ git_constraints }}"
+{%- if devel %}
+    num_units: 1
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    num_units: 2
+    to: [9, 11]
+{%- else %}
+    num_units: 2
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      cgit_secret: include-base64://{{ local_dir }}/cgit-secret
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      main_site_root: "{{ main_site_root }}"
+      nagios_context: "{{ nagios_context }}"
+      nagios_e2e_urls: "{{ nagios_e2e_urls_http }}"
+      nfs: true
+      openid_provider_root: "{{ openid_provider_root }}"
+      site_name: "{{ site_name }}"
+      virtinfo_endpoint: "{{ virtinfo_endpoint }}"
+  turnip-api:
+    charm: {{ charm_dir }}/turnip-api
+    constraints: "{{ git_constraints }}"
+{%- if devel %}
+    num_units: 1
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    num_units: 2
+    to: [9, 11]
+{%- else %}
+    num_units: 2
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      nfs: true
+  turnip-celery:
+    charm: {{ charm_dir }}/turnip-celery
+    constraints: "{{ git_constraints }}"
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+{%- endif %}
+{#- On PS5, we deploy this to its own machine. #}
+{%- if devel %}
+    to: [turnip-pack-backend]
+{%- elif stage_name == "production" %}
+    to: [9, 11]
+{%- elif stage_name == "qastaging" %}
+    to: [turnip-pack-backend, turnip-pack-backend]
+{%- endif %}
+    options:
+      log_hosts_allow: "{{ log_hosts_allow }}"
+      nagios_context: "{{ nagios_context }}"
+      nfs: true
+      virtinfo_endpoint: "{{ virtinfo_endpoint }}"
+{%- if devel %}
+  turnipcake:
+    charm: {{ charm_dir }}/turnipcake
+    num_units: 1
+{%- endif %}
+  nrpe:
+    charm: cs:nrpe-73
+    options:
+      hostgroups: "{{ nagios_hostgroups }}"
+      nagios_host_context: "{{ nagios_context }}"
+      # Without this, things get confused due to our use of placement
+      # deploys of several different principal charms onto the same
+      # machines, and we end up with mismatches between host__* and
+      # service__* files in /var/lib/nagios/export/.
+      nagios_hostname_type: host
+      export_nagios_definitions: true
+      nagios_master: "{{ nagios_master }}"
+  # We need to configure telegraf explicitly here because we use custom
+  # plugins.  The subordinates spec will set up the relations.
+  telegraf:
+    charm: cs:~llama-charmers-next/telegraf
+    expose: true
+    options:
+      extra_plugins: |-
+        [[inputs.statsd]]
+          service_address = ":8125"
+          protocol = "udp"
+  rabbitmq-server:
+    charm: cs:rabbitmq-server
+    constraints: "{{ rabbitmq_constraints }}"
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+    options:
+      min-cluster-size: 2
+{%- endif %}
+{%- if stage_name == "production" %}
+    to: ['16', '17']
+{%- endif %}
+{%- if turku_environment_name %}
+  turku-agent:
+    charm: cs:~turku-charmers/turku-agent
+    options:
+      api_auth: include-file://{{local_dir}}/turku.key
+      api_url: https://turku.admin.canonical.com/v1
+      environment_name: "{{ turku_environment_name }}"
+      sources: '{"git": {"comment": "LP Git repositories", "path": "/srv/data/turnip/repos", "snapshot_mode": "link-dest"}}'
+{%- endif %}
+relations:
+  - ["haproxy", "turnip-pack-backend"]
+  - ["haproxy", "turnip-pack-virt:turnip-pack-backend"]
+  - ["haproxy", "turnip-pack-virt:turnip-pack-virt"]
+  - ["haproxy", "turnip-pack-frontend-git:turnip-pack-virt"]
+  - ["haproxy", "turnip-pack-frontend-git:turnip-pack-frontend-git"]
+  - ["haproxy", "turnip-pack-frontend-ssh:turnip-pack-virt"]
+  - ["haproxy", "turnip-pack-frontend-ssh:turnip-pack-frontend-ssh"]
+  - ["haproxy", "turnip-pack-frontend-http:turnip-pack-virt"]
+  - ["haproxy", "turnip-pack-frontend-http:turnip-pack-frontend-http"]
+  - ["haproxy", "turnip-api"]
+{%- if devel %}
+  - ["haproxy", "turnipcake:turnipcake"]
+{%- endif %}
+{%- if not devel %}
+  - ["autocert-haproxy", "haproxy"]
+{%- endif %}
+  - ["nfs-ganesha", "turnip-pack-backend"]
+  - ["nfs-ganesha", "turnip-pack-frontend-http"]
+  - ["nfs-ganesha", "turnip-api"]
+  - ["nfs-ganesha", "turnip-celery"]
+  - ["nrpe", "haproxy:nrpe-external-master"]
+{%- if not devel %}
+  - ["nrpe", "autocert-haproxy:nrpe-external-master"]
+{%- endif %}
+  - ["nrpe", "nfs-ganesha"]
+  - ["nrpe", "rabbitmq-server"]
+  - ["nrpe", "turnip-pack-backend"]
+  - ["nrpe", "turnip-pack-virt"]
+  - ["nrpe", "turnip-pack-frontend-git"]
+  - ["nrpe", "turnip-pack-frontend-ssh"]
+  - ["nrpe", "turnip-pack-frontend-http"]
+  - ["nrpe", "turnip-api"]
+  - ["nrpe", "turnip-celery"]
+{%- if devel %}
+  - ["turnip-api", "turnipcake"]
+{%- endif %}
+  - ['rabbitmq-server:amqp', "turnip-api:amqp"]
+  - ['rabbitmq-server:amqp', "turnip-celery:amqp"]
+{%- if turku_environment_name %}
+  - ["turku-agent", "nfs-ganesha"]
+{%- endif %}
diff --git a/lp-git/collect-charm-upgrades b/lp-git/collect-charm-upgrades
new file mode 100644
index 0000000..21c5164
--- /dev/null
+++ b/lp-git/collect-charm-upgrades
@@ -0,0 +1,2 @@
+# Add any charm upgrades which are part of the current deployment.
+# This file can be empty when there are none.
diff --git a/lp-git/collect-charms b/lp-git/collect-charms
new file mode 100644
index 0000000..ba6156e
--- /dev/null
+++ b/lp-git/collect-charms
@@ -0,0 +1,9 @@
+haproxy				lp:~ubuntuone-pqm-team/charm-haproxy/snap-store;revno=119,overwrite=True
+turnip-pack-backend		git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-pack-backend;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-pack-virt		git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-pack-virt;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-pack-frontend-git	git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-pack-frontend-git;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-pack-frontend-ssh	git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-pack-frontend-ssh;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-pack-frontend-http	git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-pack-frontend-http;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-api			git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-api;revno=build/80bf3cdc8ada3e002c0e492f423942b5e3be4fa7
+turnip-celery			git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnip-celery;revno=build/f51db0b27412c8818dfeec8b3659f1f7bcba9bf6
+turnipcake			git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/charm-build-turnipcake;revno=build/0aff4020a3b69b46cca5c6da2e4677bf1c22ab01
diff --git a/lp-git/collect-payload b/lp-git/collect-payload
new file mode 100644
index 0000000..5a5d963
--- /dev/null
+++ b/lp-git/collect-payload
@@ -0,0 +1,3 @@
+turnip-code-tip          git+https://git.launchpad.net/turnip
+turnipcake-code-tip      git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/turnipcake
+turnip-dependencies      git+https://git.launchpad.net/~canonical-launchpad-branches/turnip/+git/dependencies
diff --git a/lp-git/configs/custom-secgroups-production-ps5.yaml b/lp-git/configs/custom-secgroups-production-ps5.yaml
new file mode 100644
index 0000000..f999dcb
--- /dev/null
+++ b/lp-git/configs/custom-secgroups-production-ps5.yaml
@@ -0,0 +1,58 @@
+applications:
+    haproxy:
+        type: neutron
+        rules:
+            - turnip-services
+    rabbitmq-server:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-backend:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-virt:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-git:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-ssh:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-http:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-api:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-celery:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
+    turnip-services:
+        # Public HTTP.
+        - {"protocol": "tcp", "family": "IPv4", "port": 80, "cidr": "0.0.0.0/0"}
+        # Public HTTPS.
+        - {"protocol": "tcp", "family": "IPv4", "port": 443, "cidr": "0.0.0.0/0"}
+        # Public git protocol.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9418, "cidr": "0.0.0.0/0"}
+        # Public SSH.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9422, "cidr": "0.0.0.0/0"}
+        # Allow app servers (chaenomeles, gac, soybean, wampee) and script
+        # servers (ackee, loganberry) to use the internal API.
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.119/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.90.20/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.57/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.61/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.26/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.90.37/32"}
diff --git a/lp-git/configs/custom-secgroups-production.yaml b/lp-git/configs/custom-secgroups-production.yaml
new file mode 100644
index 0000000..6f77ef2
--- /dev/null
+++ b/lp-git/configs/custom-secgroups-production.yaml
@@ -0,0 +1,58 @@
+applications:
+    haproxy:
+        type: iptables
+        rules:
+            - turnip-services
+    rabbitmq-server:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-pack-backend:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-pack-virt:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-git:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-ssh:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-http:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-api:
+        type: iptables
+        rules:
+            - rsync-logs
+    turnip-celery:
+        type: iptables
+        rules:
+            - rsync-logs
+rules:
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
+    turnip-services:
+        # Public HTTP.
+        - {"protocol": "tcp", "family": "IPv4", "port": 80, "cidr": "0.0.0.0/0"}
+        # Public HTTPS.
+        - {"protocol": "tcp", "family": "IPv4", "port": 443, "cidr": "0.0.0.0/0"}
+        # Public git protocol.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9418, "cidr": "0.0.0.0/0"}
+        # Public SSH.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9422, "cidr": "0.0.0.0/0"}
+        # Allow app servers (chaenomeles, gac, soybean, wampee) and script
+        # servers (ackee, loganberry) to use the internal API.
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.119/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.90.20/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.57/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.61/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.89.26/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.90.37/32"}
diff --git a/lp-git/configs/custom-secgroups-qastaging-ps5.yaml b/lp-git/configs/custom-secgroups-qastaging-ps5.yaml
new file mode 120000
index 0000000..fa0da02
--- /dev/null
+++ b/lp-git/configs/custom-secgroups-qastaging-ps5.yaml
@@ -0,0 +1 @@
+custom-secgroups-qastaging.yaml
\ No newline at end of file
diff --git a/lp-git/configs/custom-secgroups-qastaging.yaml b/lp-git/configs/custom-secgroups-qastaging.yaml
new file mode 100644
index 0000000..148a150
--- /dev/null
+++ b/lp-git/configs/custom-secgroups-qastaging.yaml
@@ -0,0 +1,71 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    haproxy:
+        type: neutron
+        rules:
+            - turnip-services
+    rabbitmq-server:
+        type: neutron
+        rules:
+            - rsync-logs
+    telegraf:
+        type: neutron
+        rules:
+            - metrics
+    turnip-pack-backend:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-virt:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-git:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-ssh:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-pack-frontend-http:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-api:
+        type: neutron
+        rules:
+            - rsync-logs
+    turnip-celery:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from wendigo.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "91.189.90.53/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "91.189.90.53/32"}
+    metrics:
+        # Allow prometheus on cyrano and mairu to scrape telegraf.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.95.24/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 9103, "cidr": "91.189.94.60/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
+    turnip-services:
+        # Public HTTP.
+        - {"protocol": "tcp", "family": "IPv4", "port": 80, "cidr": "0.0.0.0/0"}
+        # Public HTTPS.
+        - {"protocol": "tcp", "family": "IPv4", "port": 443, "cidr": "0.0.0.0/0"}
+        # Public git protocol.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9418, "cidr": "0.0.0.0/0"}
+        # Public SSH.
+        - {"protocol": "tcp", "family": "IPv4", "port": 9422, "cidr": "0.0.0.0/0"}
+        # Allow script servers (atemoya, gandwana) to use the internal API.
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "91.189.94.52/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 19417, "cidr": "10.22.112.8/32"}
diff --git a/lp-git/make-lxd-profile b/lp-git/make-lxd-profile
new file mode 100755
index 0000000..e53e573
--- /dev/null
+++ b/lp-git/make-lxd-profile
@@ -0,0 +1,32 @@
+#! /usr/bin/python3
+
+import json
+import subprocess
+from textwrap import dedent
+
+
+def profile_exists(profile_name):
+    return subprocess.call(
+        ["lxc", "profile", "show", profile_name],
+        stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
+
+
+def profile_set(profile_name, key, value):
+    subprocess.check_call(["lxc", "profile", "set", profile_name, key, value])
+
+
+model = json.loads(
+    subprocess.check_output(["juju", "show-model", "--format=json"]))
+model_name = list(model.values())[0]["short-name"]
+profile_name = "juju-{}".format(model_name)
+if not profile_exists(profile_name):
+    subprocess.check_call(["lxc", "profile", "create", profile_name])
+profile_set(
+    profile_name, "raw.apparmor",
+    dedent("""\
+        mount fstype=nfs,
+        mount fstype=nfs4,
+        mount fstype=nfsd,
+        mount fstype=rpc_pipefs,
+        """))
+profile_set(profile_name, "security.privileged", "true")
diff --git a/lp-git/manifest b/lp-git/manifest
new file mode 120000
index 0000000..6893813
--- /dev/null
+++ b/lp-git/manifest
@@ -0,0 +1 @@
+manifests/ci-run
\ No newline at end of file
diff --git a/lp-git/manifest-perform-autodeploy b/lp-git/manifest-perform-autodeploy
new file mode 120000
index 0000000..2290e8d
--- /dev/null
+++ b/lp-git/manifest-perform-autodeploy
@@ -0,0 +1 @@
+manifests/update-code-asset
\ No newline at end of file
diff --git a/lp-git/manifest-verify b/lp-git/manifest-verify
new file mode 120000
index 0000000..6e02de4
--- /dev/null
+++ b/lp-git/manifest-verify
@@ -0,0 +1 @@
+manifests/verify
\ No newline at end of file
diff --git a/lp-git/manifests/ci-run b/lp-git/manifests/ci-run
new file mode 100644
index 0000000..2779c42
--- /dev/null
+++ b/lp-git/manifests/ci-run
@@ -0,0 +1,4 @@
+include config=manifests/initial-deployment
+include config=manifests/upgrade-deployment
+include config=manifests/update-code-asset
+include config=manifests/run-payload-tests
diff --git a/lp-git/manifests/initial-deployment b/lp-git/manifests/initial-deployment
new file mode 100644
index 0000000..971219d
--- /dev/null
+++ b/lp-git/manifests/initial-deployment
@@ -0,0 +1,12 @@
+collect config=collect-payload
+script config=utils/make-branches BRANCH=qastaging
+collect config=collect-charms
+script config=predeploy
+# nfs-ganesha sometimes gets stuck in "maintenance" / "Updating apt cache";
+# tolerate that for now.
+# Code deployment is taking ~1h. We set 1.5h as timeout.
+bundle config=bundle.yaml local=deploy-secrets status-timeout=5400 max-wait=900 additional-ready-states=maintenance
+juju-check-wait
+script config=postdeploy
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-git/manifests/rollback-code-asset b/lp-git/manifests/rollback-code-asset
new file mode 100644
index 0000000..69127bc
--- /dev/null
+++ b/lp-git/manifests/rollback-code-asset
@@ -0,0 +1,9 @@
+# Check for juju status errors, but don't verify service health; something
+# is presumably wrong given that we're rolling back.
+juju-check-wait
+
+# Roll back the code asset.  The caller must set the BUILD_LABEL environment
+# variable to specify the rollback target.
+script config=update-code-asset ROLLBACK=true
+
+include config=manifests/verify
diff --git a/lp-git/manifests/run-payload-tests b/lp-git/manifests/run-payload-tests
new file mode 100644
index 0000000..7731a73
--- /dev/null
+++ b/lp-git/manifests/run-payload-tests
@@ -0,0 +1 @@
+script config=run-payload-tests SERVICE=turnip-pack-backend
diff --git a/lp-git/manifests/secgroups b/lp-git/manifests/secgroups
new file mode 100644
index 0000000..8c438ee
--- /dev/null
+++ b/lp-git/manifests/secgroups
@@ -0,0 +1 @@
+script config=utils/custom-secgroups.py SKIP_STAGES=devel
diff --git a/lp-git/manifests/update-code-asset b/lp-git/manifests/update-code-asset
new file mode 100644
index 0000000..95be0cf
--- /dev/null
+++ b/lp-git/manifests/update-code-asset
@@ -0,0 +1,7 @@
+include config=manifests/verify
+
+# Update the code asset, by default to the latest successfully tested asset,
+# but with the option to override.
+script config=update-code-asset
+
+include config=manifests/verify
diff --git a/lp-git/manifests/upgrade-deployment b/lp-git/manifests/upgrade-deployment
new file mode 100644
index 0000000..a0116c0
--- /dev/null
+++ b/lp-git/manifests/upgrade-deployment
@@ -0,0 +1,14 @@
+include config=manifests/verify
+collect config=collect-payload
+script config=utils/make-branches BRANCH=qastaging
+collect config=collect-charms
+collect config=collect-charm-upgrades
+script config=predeploy
+script config=upgrade-charms
+# nfs-ganesha sometimes gets stuck in "maintenance" / "Updating apt cache";
+# tolerate that for now.
+bundle config=bundle.yaml local=deploy-secrets additional-ready-states=maintenance
+juju-check-wait
+script config=postdeploy
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-git/manifests/verify b/lp-git/manifests/verify
new file mode 100644
index 0000000..73bd3ac
--- /dev/null
+++ b/lp-git/manifests/verify
@@ -0,0 +1,4 @@
+juju-check-wait status-timeout=3600
+# It occasionally takes a little while for all the servers to start
+# accepting connections.
+verify retry=5
diff --git a/lp-git/postdeploy b/lp-git/postdeploy
new file mode 100755
index 0000000..86c04ec
--- /dev/null
+++ b/lp-git/postdeploy
@@ -0,0 +1,13 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+case "${MOJO_STAGE##*/}" in
+    # production is on metal at the moment.
+    qastaging)
+        "$TOP/utils/add-floating-ip" haproxy
+        ;;
+esac
+
+exit 0
diff --git a/lp-git/predeploy b/lp-git/predeploy
new file mode 100755
index 0000000..f098a42
--- /dev/null
+++ b/lp-git/predeploy
@@ -0,0 +1,107 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+create_self_signed () {
+    local dir="$1"
+    local hostname="$2"
+
+    if [ ! -e "${dir}/${hostname}.key" ]; then
+        echo "Generating self-signed key for testing..."
+        openssl req -new -nodes \
+            -keyout "${dir}/${hostname}.key" -out "${dir}/${hostname}.csr" \
+            -subj "/C=ZZ/ST=YY/L=XX/O=The Mojo/OU=Continuous Dis Integration/CN=${hostname}"
+    fi
+    if [ ! -e "${dir}/${hostname}.crt" ]; then
+        echo "Generating self-signed certificate for testing..."
+        openssl x509 -req -days 365 \
+            -in "${dir}/${hostname}.csr" -signkey "${dir}/${hostname}.key" \
+            -out "${dir}/${hostname}.crt"
+    fi
+}
+
+create_ssh_host_key () {
+    local key="$1"
+
+    if [ ! -e "$key" ]; then
+        echo "Generating SSH host key for testing..."
+        ssh-keygen -t rsa -b 2048 -f "$key" -N ''
+    fi
+}
+
+create_cgit_secret () {
+    local secret="$1"
+
+    [ ! -e "$secret" ] || return 0
+
+    # Generate an HMAC secret key used to sign the auth cookie.  The key
+    # generation code is from paste.auth.cookie.  The key should remain
+    # constant across deployments, although when we need to rotate it (at
+    # the cost of terminating any running sessions), we just need to remove
+    # it from ${MOJO_LOCAL_DIR}/cgit-secret and redeploy.
+    echo "Generating cgit secret for testing..."
+    python3 -c '
+import random
+import sys
+
+rnd = random.SystemRandom()
+all_chars = bytes(range(256))
+with open(sys.argv[1], "wb") as f:
+    f.write(bytes(rnd.choice(all_chars) for _ in range(64)))
+' "$secret"
+}
+
+STORAGE_URL="$(${TOP}/utils/get-swift-storage-url)"
+TURNIP_BUILD_LABEL="$(
+    ATTEMPTED_OR_SUCCESSFUL=successful \
+    MOJO_DOWNLOADER_ANONYMOUS=1 \
+    MOJO_DOWNLOADER_CONTAINER_NAME=turnip-builds \
+    ${TOP}/utils/get-last-build-label)"
+TURNIPCAKE_BUILD_LABEL="$(
+    ATTEMPTED_OR_SUCCESSFUL=successful \
+    MOJO_DOWNLOADER_ANONYMOUS=1 \
+    MOJO_DOWNLOADER_CONTAINER_NAME=turnipcake-builds \
+    ${TOP}/utils/get-last-build-label)"
+for service in \
+        turnip-pack-backend \
+        turnip-pack-virt \
+        turnip-pack-frontend-git \
+        turnip-pack-frontend-ssh \
+        turnip-pack-frontend-http \
+        turnip-api \
+        turnip-celery; do
+    ${TOP}/utils/set-local-config --bundle --default turnip "$service" \
+        swift_container_name=turnip-builds \
+        swift_storage_url="${STORAGE_URL}"
+    ${TOP}/utils/set-local-config --bundle turnip "$service" \
+        build_label="${TURNIP_BUILD_LABEL#turnip=}"
+done
+
+AUTOCERT_HOSTNAME=
+case "${MOJO_STAGE##*/}" in
+    devel)
+        create_self_signed "${MOJO_LOCAL_DIR}" git.launchpad.test
+        ${TOP}/utils/set-local-config --bundle --default turnip turnipcake \
+            swift_container_name=turnipcake-builds \
+            swift_storage_url="${STORAGE_URL}"
+        ${TOP}/utils/set-local-config --bundle turnip turnipcake \
+            build_label="${TURNIPCAKE_BUILD_LABEL#turnipcake=}" \
+        ;;
+    qastaging|qastaging-ps5)
+        AUTOCERT_HOSTNAME=git.qastaging.paddev.net
+        ;;
+    production|production-ps5)
+        AUTOCERT_HOSTNAME=git.launchpad.net
+        ;;
+esac
+
+if [ "${AUTOCERT_HOSTNAME}" ]; then
+    echo "${AUTOCERT_HOSTNAME}=$(cat "${MOJO_LOCAL_DIR}/${AUTOCERT_HOSTNAME}.token")" \
+        >"${MOJO_LOCAL_DIR}/autocert-auth-pairs"
+fi
+
+create_ssh_host_key "${MOJO_LOCAL_DIR}/ssh-host-key"
+create_cgit_secret "${MOJO_LOCAL_DIR}/cgit-secret"
+
+exit 0
diff --git a/lp-git/run-payload-tests b/lp-git/run-payload-tests
new file mode 100755
index 0000000..7e61ac4
--- /dev/null
+++ b/lp-git/run-payload-tests
@@ -0,0 +1,18 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os
+import subprocess
+import sys
+
+from utils import utils
+
+
+payload_name = os.environ['SERVICE']
+app_unit = utils.get_first_unit_for_service(payload_name)
+sys.stdout.write("Running payload tests on {}\n".format(app_unit))
+subprocess.check_call([
+    'juju', 'run', '--unit', app_unit,
+    'make -C /srv/turnip/{}/code check'.format(payload_name),
+    ])
diff --git a/lp-git/update-code-asset b/lp-git/update-code-asset
new file mode 100755
index 0000000..12492d9
--- /dev/null
+++ b/lp-git/update-code-asset
@@ -0,0 +1,84 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os
+import subprocess
+import sys
+
+import requests
+
+from utils import utils
+
+
+is_rollback = 'ROLLBACK' in os.environ
+if 'ROLLBACK' in os.environ and 'BUILD_LABEL' not in os.environ:
+    sys.stderr.write("Rollback requires specifying BUILD_LABEL.\n")
+    sys.exit(1)
+
+combined_build_label = os.environ.get('BUILD_LABEL', '')
+if not combined_build_label:
+    combined_build_label = utils.get_last_build_label(
+        container_name='turnip-builds',
+        build_type='successful',
+        anonymous=True)
+    if 'turnipcake' in utils.payload_names():
+        combined_build_label = '%s:%s' % (
+            combined_build_label,
+            utils.get_last_build_label(
+                container_name='turnipcake-builds',
+                build_type='successful',
+                anonymous=True))
+if combined_build_label:
+    # Verify that the given builds exist on swift.
+    build_labels = utils.split_build_label(combined_build_label)
+    storage_url = utils.get_swift_auth(anonymous=True)[0]
+    for payload_name, payload_label in build_labels.items():
+        container_name = (
+            'turnipcake-builds' if payload_name == 'turnipcake'
+            else 'turnip-builds')
+        object_url = '{}/{}/{}-builds/{}/{}.tar.gz'.format(
+            storage_url, container_name, payload_name, payload_label,
+            payload_name)
+        response = requests.head(object_url)
+        if response.status_code != 200:
+            sys.stdout.write("Unable to fetch {} from Swift: {} {}".format(
+                object_url, response.status_code, response.reason))
+            sys.exit(1)
+else:
+    sys.stdout.write("Unable to find latest build label from swift.")
+    sys.exit(1)
+
+juju_services = utils.juju_services()
+for payload_name, payload_label in build_labels.items():
+    if payload_name == 'turnip':
+        app_names = [
+            'turnip-pack-backend',
+            'turnip-pack-virt',
+            'turnip-pack-frontend-git',
+            'turnip-pack-frontend-ssh',
+            'turnip-pack-frontend-http',
+            'turnip-api',
+            'turnip-celery',
+            ]
+    else:
+        app_names = [payload_name]
+    for app_name in app_names:
+        if app_name not in juju_services:
+            sys.stdout.write("Skipping {} (not deployed).\n".format(app_name))
+            continue
+        current_revid = subprocess.check_output(
+            ['juju', 'config', app_name, 'build_label'],
+            universal_newlines=True).rstrip('\n')
+        if payload_label == current_revid:
+            sys.stdout.write(
+                "The current {} revision ID ({}) already matches {}.\n"
+                "Skipping setting build_label.\n".format(
+                    app_name, current_revid, payload_label))
+            continue
+
+        sys.stdout.write("Updating {} code asset from {} to {}.\n".format(
+            app_name, current_revid, payload_label))
+        subprocess.check_call([
+            'juju', 'config', app_name,
+            'build_label={}'.format(payload_label)])
diff --git a/lp-git/upgrade-charms b/lp-git/upgrade-charms
new file mode 100755
index 0000000..5ce2710
--- /dev/null
+++ b/lp-git/upgrade-charms
@@ -0,0 +1,29 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os.path
+import subprocess
+
+from utils import utils
+
+
+juju_services = utils.juju_services()
+app_names = [
+    'nfs-ganesha',
+    'turnip-api',
+    'turnip-pack-backend',
+    'turnip-pack-frontend-git',
+    'turnip-pack-frontend-http',
+    'turnip-pack-frontend-ssh',
+    'turnip-pack-virt', 
+    'turnip-celery',
+    ]
+
+for app_name in app_names:
+    if app_name not in juju_services:
+        continue
+    subprocess.check_call([
+        'juju', 'upgrade-charm', app_name,
+        '--path=%s' % os.path.join(
+            os.environ['MOJO_REPO_DIR'], os.environ['MOJO_SERIES'], app_name)])
diff --git a/lp-git/utils b/lp-git/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/lp-git/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file
diff --git a/lp-git/verify b/lp-git/verify
new file mode 100755
index 0000000..df9e6ef
--- /dev/null
+++ b/lp-git/verify
@@ -0,0 +1,8 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+export EXTRA_SKIP_CHECKS="check_swap${EXTRA_SKIP_CHECKS:+|${EXTRA_SKIP_CHECKS}}"
+
+exec "$TOP/utils/verify"
diff --git a/lp-signing/README.md b/lp-signing/README.md
new file mode 100644
index 0000000..7c1b22c
--- /dev/null
+++ b/lp-signing/README.md
@@ -0,0 +1,27 @@
+# Launchpad signing service
+
+This spec deploys the Launchpad signing service.
+
+You can run it locally using Juju's LXD support and Mojo.  First, configure
+your environment to download payload builds from the output of our Jenkins
+jobs:
+
+    export MOJO_ROOT="$HOME/.local/share/mojo"
+    export MOJO_DOWNLOADER_STORAGE_URL=https://objectstorage.prodstack4-5.canonical.com/v1/AUTH_41514c3d240e4112923b17f0f431284f
+    export MOJO_DOWNLOADER_CONTAINER_NAME=lp-signing-builds
+
+Then run the spec using Mojo:
+
+    mojo project-new -s bionic -c containerless mojo-lp-signing
+    mojo workspace-new -p mojo-lp-signing -s bionic --stage lp-signing/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+    mojo run -p mojo-lp-signing -s bionic --stage lp-signing/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+
+You must have python-requests, python-yaml, and python-swiftclient
+installed.
+
+This spec stores its code payload in Swift, so you must have suitable
+credentials in your environment.  The easiest way to handle this for
+development is to [deploy a local Swift instance
+first](https://www.chiark.greenend.org.uk/~cjwatson/blog/deploying-swift.html).
diff --git a/lp-signing/collect-charm-upgrades b/lp-signing/collect-charm-upgrades
new file mode 100644
index 0000000..7155c54
--- /dev/null
+++ b/lp-signing/collect-charm-upgrades
@@ -0,0 +1,4 @@
+# Add any charm upgrades which are part of the current deployment.
+# This file can be empty when there are none.
+haproxy				lp:~ubuntuone-pqm-team/charm-haproxy/snap-store;revno=119,overwrite=True
+lp-signing			git+https://git.launchpad.net/~launchpad/lp-signing/+git/charm-build-lp-signing;revno=master,overwrite=True
diff --git a/lp-signing/collect-charms b/lp-signing/collect-charms
new file mode 100644
index 0000000..12a2a9d
--- /dev/null
+++ b/lp-signing/collect-charms
@@ -0,0 +1,23 @@
+# Services
+haproxy				lp:~ubuntuone-pqm-team/charm-haproxy/snap-store;revno=119,overwrite=True
+postgresql			cs:postgresql-203
+lp-signing			git+https://git.launchpad.net/~launchpad/lp-signing/+git/charm-build-lp-signing;revno=build/530adb773210eacc6f710dfa8d38872c08da5023
+
+# Subordinates
+nrpe				cs:nrpe-52
+ntp				cs:ntp
+telegraf			cs:telegraf-27
+turku-agent			cs:~turku-charmers/turku-agent
+ubuntu				cs:ubuntu
+
+{%- if stage_name != "devel" %}
+# Basenode
+haproxy/exec.d					@
+haproxy/exec.d/basenode				lp:basenode;revno=109
+postgresql/exec.d				@
+postgresql/exec.d/basenode			lp:basenode;revno=109
+lp-signing/exec.d				@
+lp-signing/exec.d/basenode			lp:basenode;revno=109
+ubuntu/exec.d					@
+ubuntu/exec.d/basenode				lp:basenode
+{%- endif %}
diff --git a/lp-signing/collect-payload b/lp-signing/collect-payload
new file mode 100644
index 0000000..e0d1c1a
--- /dev/null
+++ b/lp-signing/collect-payload
@@ -0,0 +1,2 @@
+lp-signing-code-tip		git+https://git.launchpad.net/lp-signing
+lp-signing-dependencies		git+https://git.launchpad.net/~launchpad/lp-signing/+git/dependencies
diff --git a/lp-signing/configs/custom-secgroups-production.yaml b/lp-signing/configs/custom-secgroups-production.yaml
new file mode 100644
index 0000000..68736b0
--- /dev/null
+++ b/lp-signing/configs/custom-secgroups-production.yaml
@@ -0,0 +1,21 @@
+applications:
+    haproxy:
+        type: iptables
+        rules:
+            - lp-signing-services
+    lp-signing:
+        type: iptables
+        rules:
+            - rsync-logs
+rules:
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
+    lp-signing-services:
+        # Allow Soyuz servers (anonster, haetae, ganondorf, chakron,
+        # bilimbi) to use the internal API.
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.89.100/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.95.83/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.95.85/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.94.85/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.89.32/32"}
diff --git a/lp-signing/configs/custom-secgroups-staging.yaml b/lp-signing/configs/custom-secgroups-staging.yaml
new file mode 100644
index 0000000..a304eca
--- /dev/null
+++ b/lp-signing/configs/custom-secgroups-staging.yaml
@@ -0,0 +1,26 @@
+applications:
+    nrpe:
+        type: neutron
+        rules:
+            - nagios-monitored
+    haproxy:
+        type: neutron
+        rules:
+            - lp-signing-services
+    lp-signing:
+        type: neutron
+        rules:
+            - rsync-logs
+rules:
+    nagios-monitored:
+        # Allow monitoring from wendigo.
+        - {"protocol": "tcp", "family": "IPv4", "port": 22, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.53/32"}
+        - {"protocol": "tcp", "family": "IPv4", "port": 5666, "cidr": "91.189.90.53/32"}
+        - {"protocol": "icmp", "family": "IPv4", "cidr": "91.189.90.53/32"}
+    rsync-logs:
+        # Allow carob to fetch logs.
+        - {"protocol": "tcp", "family": "IPv4", "port": 873, "cidr": "91.189.90.14/32"}
+    lp-signing-services:
+        # Allow dogfood servers (labbu) to use the internal API.
+        - {"protocol": "tcp", "family": "IPv4", "port": 8000, "cidr": "91.189.90.132/32"}
diff --git a/lp-signing/golive/collect b/lp-signing/golive/collect
new file mode 100644
index 0000000..c20a2cb
--- /dev/null
+++ b/lp-signing/golive/collect
@@ -0,0 +1,8 @@
+# These are unpinned on the assumption that they should always be the latest when golive
+# is added.
+canonical-livepatch	cs:canonical-livepatch
+landscape-client	cs:landscape-client
+userdir-ldap		lp:userdir-ldap-charms/userdir-ldap
+{%- if stage_name == "production" %}
+hp-health		lp:canonical-hp-health-charm/built
+{%- endif %}
diff --git a/lp-signing/golive/manifest b/lp-signing/golive/manifest
new file mode 100644
index 0000000..93c9f76
--- /dev/null
+++ b/lp-signing/golive/manifest
@@ -0,0 +1,3 @@
+collect config=golive/collect
+deploy config=golive/services target=golive
+include config=manifests/verify
diff --git a/lp-signing/golive/services b/lp-signing/golive/services
new file mode 100644
index 0000000..b7ceb90
--- /dev/null
+++ b/lp-signing/golive/services
@@ -0,0 +1,66 @@
+{%- if stage_name == "production" %}
+{%-   set environment_tag = "devops-production" %}
+{%- elif stage_name == "staging" %}
+{%-   set environment_tag = "devops-staging" %}
+{%- else %}
+{%-   set environment_tag = "devops-unknown" %}
+{%- endif -%}
+golive:
+  series: {{ series }}
+  services:
+    haproxy:
+      charm: haproxy
+    postgresql:
+      charm: postgresql
+    lp-signing:
+      charm: lp-signing
+    nrpe:
+      charm: nrpe
+    landscape:
+      charm: landscape-client
+      options:
+        url: https://landscape.is.canonical.com/message-system
+        ping-url: http://landscape.is.canonical.com/ping
+        account-name: standalone
+        tags: juju-managed, devops-instance, {{environment_tag}}
+        registration-key: include-file://{{local_dir}}/canonical-is-landscape.key
+    canonical-livepatch:
+      charm: canonical-livepatch
+      options:
+        livepatch_key: include-file://{{local_dir}}/canonical-is-livepatch.key
+        livepatch_proxy: http://squid.internal:3128
+        snap_proxy: http://squid.internal:3128
+{%- if stage_name == "production" %}
+    physical-hosts:
+      charm: ubuntu
+    nrpe-physical:
+      charm: nrpe
+    hp-health:
+      charm: hp-health
+      options:
+        hp-firmware-info-url: 'http://archive.admin.canonical.com/other/hp-firmware/info.yaml'
+{%- endif %}
+    userdir-ldap:
+      charm: userdir-ldap
+      options:
+        apt-repo-spec: "deb http://archive.admin.canonical.com/ubuntu {{ series }}-cat main"
+  relations:
+{%- if stage_name != "production" %}
+    - ["canonical-livepatch", "haproxy"]
+    - ["canonical-livepatch", "postgresql"]
+    - ["canonical-livepatch", "lp-signing"]
+    - ["canonical-livepatch", "nrpe"]
+{%- else %}
+    - ["canonical-livepatch", "physical-hosts"]
+    - ["canonical-livepatch", "nrpe-physical"]
+    - ["hp-health", "physical-hosts"]
+    - ["hp-health", "nrpe-physical"]
+    - ["landscape", "physical-hosts"]
+    - ["userdir-ldap", "physical-hosts"]
+{%- endif %}
+    - ["landscape", "haproxy"]
+    - ["landscape", "postgresql"]
+    - ["landscape", "lp-signing"]
+    - ["userdir-ldap", "haproxy"]
+    - ["userdir-ldap", "postgresql"]
+    - ["userdir-ldap", "lp-signing"]
diff --git a/lp-signing/manifest b/lp-signing/manifest
new file mode 120000
index 0000000..6893813
--- /dev/null
+++ b/lp-signing/manifest
@@ -0,0 +1 @@
+manifests/ci-run
\ No newline at end of file
diff --git a/lp-signing/manifest-perform-autodeploy b/lp-signing/manifest-perform-autodeploy
new file mode 120000
index 0000000..2290e8d
--- /dev/null
+++ b/lp-signing/manifest-perform-autodeploy
@@ -0,0 +1 @@
+manifests/update-code-asset
\ No newline at end of file
diff --git a/lp-signing/manifest-verify b/lp-signing/manifest-verify
new file mode 120000
index 0000000..6e02de4
--- /dev/null
+++ b/lp-signing/manifest-verify
@@ -0,0 +1 @@
+manifests/verify
\ No newline at end of file
diff --git a/lp-signing/manifests/ci-run b/lp-signing/manifests/ci-run
new file mode 100644
index 0000000..e8f6454
--- /dev/null
+++ b/lp-signing/manifests/ci-run
@@ -0,0 +1,3 @@
+include config=manifests/initial-deployment
+include config=manifests/upgrade-deployment
+include config=manifests/update-code-asset-ci-run
diff --git a/lp-signing/manifests/initial-deployment b/lp-signing/manifests/initial-deployment
new file mode 100644
index 0000000..55131a8
--- /dev/null
+++ b/lp-signing/manifests/initial-deployment
@@ -0,0 +1,9 @@
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charms
+script config=predeploy
+deploy config=services delay=0 local=deploy-secrets max-wait=900
+juju-check-wait
+script config=postdeploy
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-signing/manifests/rollback-code-asset b/lp-signing/manifests/rollback-code-asset
new file mode 100644
index 0000000..ad513ae
--- /dev/null
+++ b/lp-signing/manifests/rollback-code-asset
@@ -0,0 +1,9 @@
+# Check for juju status errors, but don't verify service health; something
+# is presumably wrong given that we're rolling back.
+juju-check-wait
+
+# Roll back the code asset, by default to the asset that we just updated
+# from, but with the option to override.
+script config=update-code-asset ROLLBACK=true
+
+include config=manifests/verify
diff --git a/lp-signing/manifests/secgroups b/lp-signing/manifests/secgroups
new file mode 100644
index 0000000..8c438ee
--- /dev/null
+++ b/lp-signing/manifests/secgroups
@@ -0,0 +1 @@
+script config=utils/custom-secgroups.py SKIP_STAGES=devel
diff --git a/lp-signing/manifests/update-code-asset b/lp-signing/manifests/update-code-asset
new file mode 100644
index 0000000..f3dd46c
--- /dev/null
+++ b/lp-signing/manifests/update-code-asset
@@ -0,0 +1,8 @@
+include config=manifests/verify
+
+# Update the code asset, by default to the latest successfully tested asset,
+# but with the option to override.
+collect config=collect-payload
+script config=update-code-asset
+
+include config=manifests/verify
diff --git a/lp-signing/manifests/update-code-asset-ci-run b/lp-signing/manifests/update-code-asset-ci-run
new file mode 100644
index 0000000..5d54629
--- /dev/null
+++ b/lp-signing/manifests/update-code-asset-ci-run
@@ -0,0 +1,4 @@
+juju-check-wait
+script config=update-code-asset CI_RUN=true
+juju-check-wait
+include config=manifests/verify
diff --git a/lp-signing/manifests/upgrade-deployment b/lp-signing/manifests/upgrade-deployment
new file mode 100644
index 0000000..049ce06
--- /dev/null
+++ b/lp-signing/manifests/upgrade-deployment
@@ -0,0 +1,12 @@
+include config=manifests/verify
+collect config=collect-payload
+script config=utils/make-branches BRANCH=staging
+collect config=collect-charm-upgrades
+script config=predeploy
+script config=upgrade-charms
+script config=utils/set-service-options
+deploy config=services delay=0 local=deploy-secrets
+juju-check-wait
+script config=postdeploy
+include config=manifests/secgroups
+include config=manifests/verify
diff --git a/lp-signing/manifests/verify b/lp-signing/manifests/verify
new file mode 100644
index 0000000..9eba6e4
--- /dev/null
+++ b/lp-signing/manifests/verify
@@ -0,0 +1,4 @@
+juju-check-wait
+# It occasionally takes a little while for all the servers to start
+# accepting connections.
+verify retry=5
diff --git a/lp-signing/postdeploy b/lp-signing/postdeploy
new file mode 100755
index 0000000..4305717
--- /dev/null
+++ b/lp-signing/postdeploy
@@ -0,0 +1,62 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+create_service_key () {
+    local key="$1"
+
+    if [ ! -e "$key" ]; then
+        touch "$key"
+        chmod 600 "$key"
+        juju run --unit=lp-signing/leader -- \
+            sh -c 'LC_ALL=C.UTF-8 /srv/lp-signing/code/env/bin/lp-signing generate-key-pair' | \
+            sed -n 's/^Private: *//p' >"$key"
+
+        if [ ! -s "$key" ]; then
+            echo "Failed to generate service private key" >&2
+            rm -f "$key"
+            exit 1
+        fi
+    fi
+
+    if [ "$(juju config lp-signing service_private_keys)" = "[]" ]; then
+        juju config lp-signing service_private_keys="[\"$(cat "$key")\"]"
+        mojo juju-check-wait
+    fi
+}
+
+create_key_storage_key () {
+    local key="$1"
+
+    if [ ! -e "$key" ]; then
+        touch "$key"
+        chmod 600 "$key"
+        juju run --unit=lp-signing/leader -- \
+            sh -c 'LC_ALL=C.UTF-8 /srv/lp-signing/code/env/bin/lp-signing generate-key-pair' | \
+            sed -n 's/^Private: *//p' >"$key"
+
+        if [ ! -s "$key" ]; then
+            echo "Failed to generate key storage private key" >&2
+            rm -f "$key"
+            exit 1
+        fi
+    fi
+
+    if [ "$(juju config lp-signing key_storage_private_keys)" = "[]" ]; then
+        juju config lp-signing key_storage_private_keys="[\"$(cat "$key")\"]"
+        mojo juju-check-wait
+    fi
+}
+
+case "${MOJO_STAGE##*/}" in
+    # production is on metal at the moment.
+    staging)
+        "$TOP/utils/add-floating-ip" haproxy
+        ;;
+esac
+
+create_service_key "${MOJO_LOCAL_DIR}/service-private-key"
+create_key_storage_key "${MOJO_LOCAL_DIR}/key-storage-private-key"
+
+exit 0
diff --git a/lp-signing/predeploy b/lp-signing/predeploy
new file mode 100755
index 0000000..b86de23
--- /dev/null
+++ b/lp-signing/predeploy
@@ -0,0 +1,25 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+case "${MOJO_STAGE##*/}" in
+    devel)
+        branch=tip
+        ;;
+    *)
+        branch=staging
+        ;;
+esac
+
+CONTAINER_NAME="${MOJO_DOWNLOADER_CONTAINER_NAME:-${MOJO_PROJECT}-builds}"
+STORAGE_URL="$(${TOP}/utils/get-swift-storage-url)"
+LP_SIGNING_BUILD_LABEL="$(
+    git -C "${MOJO_BUILD_DIR}/lp-signing-code-${branch}" rev-parse HEAD)"
+${TOP}/utils/set-local-config --default lp-signing lp-signing \
+    swift_container_name="${CONTAINER_NAME}" \
+    swift_storage_url="${STORAGE_URL}"
+${TOP}/utils/set-local-config lp-signing lp-signing \
+    build_label="${LP_SIGNING_BUILD_LABEL}"
+
+exit 0
diff --git a/lp-signing/services b/lp-signing/services
new file mode 100644
index 0000000..8a20d9d
--- /dev/null
+++ b/lp-signing/services
@@ -0,0 +1,140 @@
+{%- if stage_name == "production" %}
+{%-   set devel = False %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32 10.172.0.0/16" %}
+{%-   set nagios_context = "lp-prodstack-signing" %}
+{%-   set nagios_hostgroups = "prodstack-lp" %}
+{%-   set nagios_master = "wekufe.canonical.com" %}
+{%-   set nagios_servicegroups = "prompt-critical" %}
+{%- elif stage_name == "staging" %}
+{%-   set devel = False %}
+{%-   set haproxy_constraints = "cores=2 mem=2048M" %}
+{%-   set log_hosts_allow = "carob.canonical.com" %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32 10.172.0.0/16" %}
+{%-   set nagios_context = "lp-stagingstack-signing" %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   set nagios_master = "wendigo.canonical.com" %}
+{%-   set postgresql_constraints = "cores=2 mem=4096M" %}
+{%-   set signing_constraints = "cores=2 mem=4096M" %}
+{%- else %}
+{%-   set devel = True %}
+{%-   set log_hosts_allow = "" %}
+{%-   set monitoring_allowed_cidr = "127.0.0.1/32" %}
+{%-   set nagios_context = "lp-devel-signing" %}
+{%-   set nagios_hostgroups = "devel-lp" %}
+{%- endif -%}
+lp-signing:
+  series: "{{ series }}"
+  services:
+    haproxy:
+      charm: haproxy
+      constraints: "{{ haproxy_constraints }}"
+{%- if devel %}
+      num_units: 1
+{%- else %}
+      num_units: 2
+{%- endif %}
+{%- if stage_name == "production" %}
+      to: ['0', '1']
+{%- endif %}
+      # Don't expose!  The API is private, and access control is managed
+      # manually.
+      options:
+        default_options: ""
+        default_timeouts: "queue 20000, client 3600000, connect 5000, server 3600000"
+        enable_monitoring: True
+        monitoring_allowed_cidr: "{{ monitoring_allowed_cidr }}"
+        monitoring_stats_refresh: 3600
+        nagios_context: "{{ nagios_context }}"
+{%- if nagios_servicegroups %}
+        nagios_servicegroups: "{{ nagios_servicegroups }},{{ nagios_context }}"
+{%- endif %}
+        peering_mode: active-active
+    postgresql:
+      charm: postgresql
+      constraints: "{{ postgresql_constraints }}"
+{%- if devel %}
+      num_units: 1
+{%- else %}
+      num_units: 2
+{%- endif %}
+      options:
+        nagios_context: "{{ nagios_context }}"
+{%- if nagios_servicegroups %}
+        nagios_servicegroups: "{{ nagios_servicegroups }},{{ nagios_context }}"
+{%- endif %}
+{%- if stage_name == "production" %}
+      to: ['2', '3']
+    turku-agent:
+      charm: turku-agent
+      options:
+        environment_name: prod-launchpad-signing
+        api_auth: include-file://{{local_dir}}/turku.key
+        api_url: https://turku.admin.canonical.com/v1
+        sources: '{"postgresql": { "comment": "PostgreSQL dumps", "path": "/var/lib/postgresql/backups", "frequency": "daily"}}'
+{%- endif %}
+    lp-signing:
+      charm: lp-signing
+      constraints: "{{ signing_constraints }}"
+{%- if devel %}
+      num_units: 1
+{%- else %}
+      num_units: 2
+{%- endif %}
+{%- if stage_name == "production" %}
+      to: ['4', '5']
+{%- endif %}
+      options:
+        log_hosts_allow: "{{ log_hosts_allow }}"
+        nagios_context: "{{ nagios_context }}"
+{%- if nagios_servicegroups %}
+        nagios_servicegroups: "{{ nagios_servicegroups }},{{ nagios_context }}"
+{%- endif %}
+        wsgi_logrotate_count: 90
+    nrpe:
+      charm: nrpe
+      options:
+        hostgroups: "{{ nagios_hostgroups }}"
+        nagios_host_context: "{{ nagios_context }}"
+{%- if nagios_master %}
+        export_nagios_definitions: true
+        nagios_master: "{{ nagios_master }}"
+{%- endif %}
+    telegraf:
+      charm: telegraf
+      expose: true
+{%- if stage_name == "production" %}
+    physical-hosts:
+      charm: ubuntu
+      num_units: 2
+      to: ['6', '7']
+    nrpe-physical:
+      charm: nrpe
+      options:
+        export_nagios_definitions: true
+        hostgroups: "{{ nagios_hostgroups }}"
+        nagios_host_context: "{{ nagios_context }}"
+        nagios_master: "{{ nagios_master }}"
+    ntp:
+      charm: ntp
+      options:
+        nagios_context: "{{ nagios_context }}"
+        nagios_servicegroups: "{{ nagios_servicegroups }}, {{ nagios_context }}"
+        source: "ntp1.canonical.com ntp2.canonical.com ntp3.canonical.com ntp4.canonical.com"
+{%- endif %}
+  relations:
+    - ["haproxy", "lp-signing"]
+    - ["lp-signing:db", "postgresql:db"]
+    - ["lp-signing:db-admin", "postgresql:db-admin"]
+    - ["nrpe", "haproxy:nrpe-external-master"]
+    - ["nrpe", "postgresql:nrpe-external-master"]
+    - ["nrpe", "lp-signing"]
+    - ["telegraf:haproxy", "haproxy"]
+    - ["telegraf:juju-info", "postgresql:juju-info"]
+    - ["telegraf:juju-info", "lp-signing"]
+{%- if stage_name == "production" %}
+    - ["postgresql", "turku-agent"]
+    - ["physical-hosts", "nrpe-physical"]
+    - ["physical-hosts", "ntp"]
+    - ["physical-hosts", "telegraf"]
+{%- endif %}
diff --git a/lp-signing/update-code-asset b/lp-signing/update-code-asset
new file mode 100755
index 0000000..aebf69b
--- /dev/null
+++ b/lp-signing/update-code-asset
@@ -0,0 +1,64 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os
+import subprocess
+import sys
+
+import requests
+
+from utils import utils
+
+
+container_name = os.environ.get(
+    'MOJO_DOWNLOADER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+
+# Get the appropriate deployment artifact from swift.
+is_ci_run = 'CI_RUN' in os.environ
+if is_ci_run:
+    branch = 'tip'
+else:
+    branch = 'staging'
+
+combined_build_label = os.environ.get('BUILD_LABEL', '')
+if not combined_build_label:
+    labels = []
+    for payload_name in utils.payload_names():
+        revid = utils.get_code_branch_revision(payload_name, branch=branch)
+        labels.append('{}={}'.format(payload_name, revid))
+    combined_build_label = ':'.join(labels)
+if combined_build_label:
+    # Verify that the given builds exist on swift.
+    build_labels = utils.split_build_label(combined_build_label)
+    storage_url = utils.get_swift_auth(anonymous=True)[0]
+    for payload_name, payload_label in build_labels.items():
+        object_url = '{}/{}/{}-builds/{}/{}.tar.gz'.format(
+            storage_url, container_name, payload_name, payload_label,
+            payload_name)
+        response = requests.head(object_url)
+        if response.status_code != 200:
+            sys.stdout.write("Unable to fetch {} from Swift: {} {}".format(
+                object_url, response.status_code, response.reason))
+            sys.exit(1)
+else:
+    sys.stdout.write("Unable to find latest build label from swift.")
+    sys.exit(1)
+
+juju_services = utils.juju_services()
+for payload_name, payload_label in build_labels.items():
+    current_revid = subprocess.check_output(
+        ['juju', 'config', 'lp-signing', 'build_label'],
+        universal_newlines=True).rstrip('\n')
+    if payload_label == current_revid:
+        sys.stdout.write(
+            "The current lp-signing revision ID ({}) already matches {}.\n"
+            "Skipping setting build_label.\n".format(
+                current_revid, payload_label))
+        continue
+
+    sys.stdout.write("Updating lp-signing code asset from {} to {}.\n".format(
+        current_revid, payload_label))
+    subprocess.check_call([
+        'juju', 'config', 'lp-signing',
+        'build_label={}'.format(payload_label)])
diff --git a/lp-signing/upgrade-charms b/lp-signing/upgrade-charms
new file mode 100755
index 0000000..9eff184
--- /dev/null
+++ b/lp-signing/upgrade-charms
@@ -0,0 +1,24 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os.path
+import subprocess
+
+from utils import utils
+
+
+juju_services = utils.juju_services()
+app_names = [
+    'haproxy',
+    'lp-signing',
+    'postgresql',
+    ]
+
+for app_name in app_names:
+    if app_name not in juju_services:
+        continue
+    subprocess.check_call([
+        'juju', 'upgrade-charm', app_name,
+        '--path=%s' % os.path.join(
+            os.environ['MOJO_REPO_DIR'], os.environ['MOJO_SERIES'], app_name)])
diff --git a/lp-signing/utils b/lp-signing/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/lp-signing/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file
diff --git a/lp-signing/verify b/lp-signing/verify
new file mode 100755
index 0000000..07173e1
--- /dev/null
+++ b/lp-signing/verify
@@ -0,0 +1,11 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+# Set EXTRA_SKIP_CHECKS, using '|' as the separator. e.g.
+# EXTRA_SKIP_CHECKS='check_ntpmon|check_hardware' mojo run -m manifest-verify
+
+export EXTRA_SKIP_CHECKS="check_swap${EXTRA_SKIP_CHECKS:+|${EXTRA_SKIP_CHECKS}}"
+
+exec "$TOP/utils/verify"
diff --git a/lp-webhooks-proxy/README.md b/lp-webhooks-proxy/README.md
new file mode 100644
index 0000000..88578dc
--- /dev/null
+++ b/lp-webhooks-proxy/README.md
@@ -0,0 +1,17 @@
+# Launchpad webhooks proxy
+
+This spec runs a simple Squid forward proxy.  Its purpose is to allow
+Launchpad's script servers to have outbound HTTP POST access to the internet
+for the purposes of dispatching webhooks, without making it possible to send
+malicious POST requests to internal hosts that trust the script servers.
+
+You can run it locally using Mojo and Juju's LXD support:
+
+    mojo project-new -s bionic -c containerless mojo-lp-webhooks-proxy
+    mojo workspace-new -p mojo-lp-webhooks-proxy -s bionic \
+        --stage lp-webhooks-proxy/devel \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
+    mojo run -p mojo-lp-webhooks-proxy -s bionic \
+        --stage lp-webhooks-proxy/devel \
+        -m manifests/initial-deployment \
+        git+https://git.launchpad.net/launchpad-mojo-specs devel
diff --git a/lp-webhooks-proxy/bundle.yaml b/lp-webhooks-proxy/bundle.yaml
new file mode 100644
index 0000000..3ff0e1b
--- /dev/null
+++ b/lp-webhooks-proxy/bundle.yaml
@@ -0,0 +1,150 @@
+{%- if stage_name in ("production", "staging") %}
+{%-   set devel = False %}
+{%-   set nagios_check_url = "http://bazaar-vcs.org/"; %}
+{#-   bazaar-vcs.org, a benign host (non-cdn) for nagios checks. #}
+{%-   set nagios_allow_hosts = "91.189.90.161" %}
+{%- else %}
+{%-   set devel = True %}
+{%-   set nagios_check_url = "http://ppa.launchpad.net/launchpad/ppa/ubuntu/"; %}
+{#-   ppa.launchpad.net, a benign host for nagios checks. #}
+{%-   set nagios_allow_hosts = "91.189.95.83, 91.189.95.85, 91.189.94.85" %}
+{%- endif %}
+{%- if stage_name == "production" %}
+{%-   set nagios_context = "lp-prodstack-webhooks-proxy" %}
+{%-   set nagios_hostgroups = "prodstack-lp" %}
+{%-   set nagios_master = "wekufe.canonical.com,nagios.ps5.internal" %}
+{#-   localhost, ackee, loganberry #}
+{%-   set trusted_hosts = "127.0.0.0/8, 91.189.89.26, 91.189.90.37" %}
+{%- elif stage_name == "staging" %}
+{%-   set nagios_context = "lp-stagingstack-webhooks-proxy" %}
+{%-   set nagios_hostgroups = "stagingstack-lp" %}
+{%-   set nagios_master = "wendigo.canonical.com" %}
+{#-   localhost, atemoya, gandwana, labbu #}
+{%-   set trusted_hosts = "127.0.0.0/8, 91.189.94.52, 10.22.112.8, 91.189.90.132" %}
+{%- else %}
+{%-   set nagios_context = "lp-devel-webhooks-proxy" %}
+{%-   set nagios_hostgroups = "devel-lp" %}
+{#-   The configured nagios_master doesn't have to be real, but it does have
+      to resolve. #}
+{%-   set nagios_master = "localhost" %}
+{#-   localhost #}
+{%-   set trusted_hosts = "127.0.0.0/8, 10.0.0.0/8" %}
+{%- endif -%}
+series: {{ series }}
+applications:
+  squid:
+    charm: cs:squid-reverseproxy-15
+    expose: true
+{%- if devel %}
+    num_units: 1
+{%- else %}
+    num_units: 2
+{%- endif %}
+    options:
+      auth_list: |
+        # Allow management by a local squidclient.
+        - all-of: ["manager localhost"]
+          http_access: allow
+        - "!src": [{{ trusted_hosts }}]
+          http_access: deny
+        # Allow hosts on denied networks.  dstdomain is unsafe.
+        # DO NOT USE DSTDOMAIN.  For URLs containing IP addresses, Squid
+        # will look up the PTR and match it against dstdomain rules!  This
+        # is obviously completely holey.
+        - port: [80, 443]
+          method: [GET, POST, CONNECT]
+          # DSTDOMAIN IS FORBIDDEN.
+          dst: [
+            {{ nagios_allow_hosts }},
+            # build.staging.snapcraft.io.
+            185.125.191.207, 185.125.191.208,
+            # build.snapcraft.io.
+            162.213.33.209, 162.213.33.210,
+            # ubuntu.com / staging.ubuntu.com, used for webhooks related
+            # to the image builder service.
+            91.189.88.180, 91.189.88.181, 91.189.91.44, 91.189.91.45,
+            185.125.191.38,
+            # Kernel team receiver - services/cdo/kernel-testing/web-servers
+            # https://answers.launchpad.net/launchpad/+question/692191
+            10.15.182.10,
+            ]
+          http_access: allow
+{%- if not devel %}
+        # Allow various Jenkins instances.
+        - port: [8080]
+          method: [GET, POST, CONNECT]
+          dst: [
+            # is-mojo-ci-jenkins-be.internal.
+            10.25.200.132,
+            # webteam-jenkins-be.internal.
+            10.25.200.74,
+            # online-services-jenkins-be.internal.
+            10.15.248.2,
+            # oil-jenkins.internal.
+            10.50.72.13,
+            ]
+          http_access: allow
+        # Allow commercial-systems. This has external (non-Canonical) access
+        # so is configured for 443/SSL only in apache.
+        - port: [443]
+          method: [GET, POST, CONNECT]
+          dst: [
+            # jenkins-webhooks.commercial-systems.canonical.com
+            162.213.33.181,
+            ]
+          http_access: allow
+{%- endif %}
+        # Forbid internal networks since a lot of systems assume that our
+        # entire PI space is trustworthy.  Problematic for webhooks to
+        # internal services, but we allow those addresses in the next rule.
+        - dst: [
+            # IANA reserved, special and private networks.
+            0.0.0.0/8, 10.0.0.0/8, 169.254.0.0/16,
+            172.16.0.0/12, 192.168.0.0/16, 127.0.0.0/8,
+            224.0.0.0/4, 240.0.0.0/4,
+            # Canonical networks.
+            91.189.88.0/21, 162.213.32.0/22, 185.125.188.0/22,
+            194.169.254.0/24,
+            "2001:67c:1360::/48", "2001:67c:1560::/46",
+            "2620:2d:4000::/44", "2a06:bc80::/29",
+            ]
+          http_access: deny
+        # IPv6 is not supported in PS4.5, but disallowing it entirely is
+        # not an option as squid will reject anything with an AAAA even if
+        # it has As too.
+        #
+        # The "ipv4" matcher does not work, and "ipv6" is a little too
+        # liberal: within the IPv6 address space we only want 2000::/3.
+        # "0.0.0.0/0" is internally overridden to "all", matching IPv6
+        # too, so we instead use "0.0.0.0/1" and "128.0.0.0/1" to match
+        # IPv4.
+        #
+        # Note also that Squid internally handles IPv4 by using its
+        # mapping into IPv6, so if you deny ::/3 before allowing IPv4 you
+        # will have a bad time.
+        - port: [80, 443]
+          method: [GET, POST, CONNECT]
+          dst: [0.0.0.0/1, 128.0.0.0/1, "2000::/3"]
+          http_access: allow
+        # Deny anything with any IP address that has not matched already.
+        - dst: ["::/0"]
+          http_access: deny
+        # Allow anything leftover.  The FQDN probably has no IP address,
+        # so we want to return a DNS error rather than a permission
+        # violation.
+        - port: [80, 443]
+          method: [GET, POST, CONNECT]
+          http_access: allow
+      enable_forward_proxy: true
+      nagios_check_http_params: "-I localhost -p 3128 -u {{ nagios_check_url }}"
+      nagios_context: {{ nagios_context }}
+      port_options: ""
+  nrpe:
+    charm: cs:nrpe-73
+    options:
+      export_nagios_definitions: true
+      hostgroups: {{ nagios_hostgroups }}
+      nagios_host_context: {{ nagios_context }}
+      nagios_master: {{ nagios_master }}
+relations:
+  - ["nrpe", "squid:nrpe-external-master"]
diff --git a/lp-webhooks-proxy/golive/collect b/lp-webhooks-proxy/golive/collect
new file mode 100644
index 0000000..94be5ab
--- /dev/null
+++ b/lp-webhooks-proxy/golive/collect
@@ -0,0 +1,4 @@
+# These are unpinned on the assumption that they should always be the latest when golive
+# is added.
+canonical-livepatch	cs:canonical-livepatch
+landscape-client	cs:landscape-client
diff --git a/lp-webhooks-proxy/golive/manifest b/lp-webhooks-proxy/golive/manifest
new file mode 100644
index 0000000..93c9f76
--- /dev/null
+++ b/lp-webhooks-proxy/golive/manifest
@@ -0,0 +1,3 @@
+collect config=golive/collect
+deploy config=golive/services target=golive
+include config=manifests/verify
diff --git a/lp-webhooks-proxy/golive/services b/lp-webhooks-proxy/golive/services
new file mode 100644
index 0000000..ee58749
--- /dev/null
+++ b/lp-webhooks-proxy/golive/services
@@ -0,0 +1,29 @@
+{%- if stage_name == "production" %}
+{%-   set environment_tag = "devops-production" %}
+{%- elif stage_name == "staging" %}
+{%-   set environment_tag = "devops-staging" %}
+{%- else %}
+{%-   set environment_tag = "devops-unknown" %}
+{%- endif -%}
+golive:
+  series: {{ series }}
+  services:
+    squid:
+      charm: squid-reverseproxy
+    landscape:
+      charm: landscape-client
+      options:
+        url: https://landscape.is.canonical.com/message-system
+        ping-url: http://landscape.is.canonical.com/ping
+        account-name: standalone
+        tags: juju-managed, devops-instance, {{environment_tag}}
+        registration-key: include-file://{{local_dir}}/canonical-is-landscape.key
+    canonical-livepatch:
+      charm: canonical-livepatch
+      options:
+        livepatch_key: include-file://{{local_dir}}/canonical-is-livepatch.key
+        livepatch_proxy: http://squid.internal:3128
+        snap_proxy: http://squid.internal:3128
+  relations:
+    - ["canonical-livepatch", "squid"]
+    - ["landscape", "squid"]
diff --git a/lp-webhooks-proxy/manifest b/lp-webhooks-proxy/manifest
new file mode 120000
index 0000000..6893813
--- /dev/null
+++ b/lp-webhooks-proxy/manifest
@@ -0,0 +1 @@
+manifests/ci-run
\ No newline at end of file
diff --git a/lp-webhooks-proxy/manifests/ci-run b/lp-webhooks-proxy/manifests/ci-run
new file mode 100644
index 0000000..5e8353d
--- /dev/null
+++ b/lp-webhooks-proxy/manifests/ci-run
@@ -0,0 +1,2 @@
+include config=manifests/initial-deployment
+include config=manifests/upgrade-deployment
diff --git a/lp-webhooks-proxy/manifests/initial-deployment b/lp-webhooks-proxy/manifests/initial-deployment
new file mode 100644
index 0000000..7652c87
--- /dev/null
+++ b/lp-webhooks-proxy/manifests/initial-deployment
@@ -0,0 +1,3 @@
+bundle config=bundle.yaml delay=0
+juju-check-wait
+include config=manifests/verify
diff --git a/lp-webhooks-proxy/manifests/upgrade-deployment b/lp-webhooks-proxy/manifests/upgrade-deployment
new file mode 100644
index 0000000..0386de0
--- /dev/null
+++ b/lp-webhooks-proxy/manifests/upgrade-deployment
@@ -0,0 +1,4 @@
+include config=manifests/verify
+bundle config=bundle.yaml delay=0
+juju-check-wait
+include config=manifests/verify
diff --git a/lp-webhooks-proxy/manifests/verify b/lp-webhooks-proxy/manifests/verify
new file mode 100644
index 0000000..f25f902
--- /dev/null
+++ b/lp-webhooks-proxy/manifests/verify
@@ -0,0 +1,4 @@
+juju-check-wait
+# It occasionally takes a little while for all the servers to start
+# accepting connections.
+verify retry=3
diff --git a/lp-webhooks-proxy/utils b/lp-webhooks-proxy/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/lp-webhooks-proxy/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file
diff --git a/lp-webhooks-proxy/verify b/lp-webhooks-proxy/verify
new file mode 100755
index 0000000..a95e4bf
--- /dev/null
+++ b/lp-webhooks-proxy/verify
@@ -0,0 +1,8 @@
+#! /bin/sh
+set -e
+
+TOP="${0%/*}"
+
+export EXTRA_SKIP_CHECKS="check_squidpeers|check_swap${EXTRA_SKIP_CHECKS:+|${EXTRA_SKIP_CHECKS}}"
+
+exec "$TOP/utils/verify"
diff --git a/mojo-lp-git b/mojo-lp-git
new file mode 120000
index 0000000..c1ccb21
--- /dev/null
+++ b/mojo-lp-git
@@ -0,0 +1 @@
+lp-git
\ No newline at end of file
diff --git a/mojo-lp-signing b/mojo-lp-signing
new file mode 120000
index 0000000..112a1c0
--- /dev/null
+++ b/mojo-lp-signing
@@ -0,0 +1 @@
+lp-signing
\ No newline at end of file
diff --git a/mojo-lp-webhooks-proxy b/mojo-lp-webhooks-proxy
new file mode 120000
index 0000000..8996f69
--- /dev/null
+++ b/mojo-lp-webhooks-proxy
@@ -0,0 +1 @@
+lp-webhooks-proxy
\ No newline at end of file
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/__init__.py
diff --git a/utils/add-floating-ip b/utils/add-floating-ip
new file mode 100755
index 0000000..d879796
--- /dev/null
+++ b/utils/add-floating-ip
@@ -0,0 +1,205 @@
+#!/usr/bin/python3
+#
+# Authors:      Paul Gear, Neale Pickett
+# Description:  Manage floating IP allocations in mojo local directory for a juju service or unit.
+# NOTE:         $MOJO_PROJECT and $MOJO_STAGE must be set before calling this script.
+#
+
+import check_version  # noqa: F401
+
+import json
+import os
+import subprocess
+
+
+SECRETS_DIR = os.path.join(
+    os.environ.get('MOJO_ROOT', '/srv/mojo'), 'LOCAL',
+    os.environ['MOJO_PROJECT'], os.environ['MOJO_STAGE'])
+
+status = json.loads(
+    subprocess.check_output(['juju', 'status', '--format=json']))
+services = status.get("applications")
+if services is None:
+    services = status["services"]
+
+
+_openstack_session = None
+
+
+def get_os_session():
+    from keystoneauth1 import loading, session
+
+    global _openstack_session
+    if _openstack_session is not None:
+        return _openstack_session
+
+    if "OS_PROJECT_NAME" in os.environ:
+        project_name = os.environ["OS_PROJECT_NAME"]
+    else:
+        project_name = os.environ["OS_TENANT_NAME"]
+    loader = loading.get_plugin_loader('password')
+    auth = loader.load_from_options(
+        username=os.environ["OS_USERNAME"],
+        password=os.environ["OS_PASSWORD"],
+        auth_url=os.environ["OS_AUTH_URL"],
+        project_name=project_name,
+        project_domain_name=os.environ["OS_PROJECT_DOMAIN_NAME"],
+        user_domain_name=os.environ["OS_USER_DOMAIN_NAME"],
+        )
+    _openstack_session = session.Session(auth=auth)
+    return _openstack_session
+
+
+def get_nova_client():
+    from novaclient import client as novaclient
+
+    if os.environ.get("OS_IDENTITY_API_VERSION") == "3":
+        auth = {"session": get_os_session()}
+    else:
+        if "OS_PROJECT_NAME" in os.environ:
+            project_name = os.environ["OS_PROJECT_NAME"]
+        else:
+            project_name = os.environ["OS_TENANT_NAME"]
+        auth = {
+            "username": os.environ["OS_USERNAME"],
+            "api_key": os.environ["OS_PASSWORD"],
+            "auth_url": os.environ["OS_AUTH_URL"],
+            "project_id": project_name,
+            "region_name": os.environ["OS_REGION_NAME"],
+            }
+    return novaclient.Client("2", **auth)
+
+
+nova_tenant = get_nova_client()
+
+
+def get_ip_pool():
+    pool = os.environ.get('MOJO_FLOATING_IP_POOL')
+    if pool is not None:
+        return pool
+
+    if os.environ["OS_USERNAME"].startswith(("stg-", "stg_")):
+        return "staging_ext_net"
+    return "ext_net"
+
+
+def units_in_service(service_name):
+    units = services[service_name]["units"]
+    return list(units)
+
+
+def machine_of_unit(unit_name):
+    service_name, _ = unit_name.split('/', 1)
+
+    unit = services[service_name]["units"][unit_name]
+    machine_no = unit["machine"]
+    machine = status["machines"][machine_no]
+
+    return machine
+
+
+def get_unit_floating_ip(unit_name):
+    machine = machine_of_unit(unit_name)
+
+    server = nova_tenant.servers.find(id=machine["instance-id"])
+
+    # If we already have a floating IP associated, use that.
+    try:
+        fip = nova_tenant.floating_ips.find(instance_id=server.id)
+        return fip.ip
+    except Exception:
+        pass
+
+    unitfn = os.path.join(SECRETS_DIR, unit_name.replace('/', '.') + ".ip")
+
+    # Rename older standard
+    oldunitfn = os.path.join(SECRETS_DIR, unit_name.replace('/', '_'))
+    try:
+        os.rename(oldunitfn, unitfn)
+    except Exception:
+        pass
+
+    # Read IP from state file.
+    try:
+        myip = open(unitfn).read().strip()
+    except Exception:
+        myip = None
+
+    # Create a new floating IP to use
+    if not myip:
+        if not os.path.isdir(SECRETS_DIR):
+            os.makedirs(SECRETS_DIR)
+        unitf = open(unitfn, "w")
+        fip = nova_tenant.floating_ips.create(get_ip_pool())
+        myip = fip.ip
+        unitf.write(myip)
+        unitf.close()
+    else:
+        try:
+            # Get the existing one
+            fip = nova_tenant.floating_ips.find(ip=myip)
+        except Exception:
+            # If this happens you're going to need to either get that back in the list,
+            # or blow away the state file so it gets a new IP.
+            raise(RuntimeError("Desired IP {} not in floating ips list!".format(myip)))
+
+    if fip.instance_id:
+        # If it's already associated, ensure it's associated to us
+        machine_id = machine.get('Id')
+        if machine_id is None:
+            machine_id = machine['instance-id']
+        if (fip.instance_id != machine_id):
+            raise(RuntimeError("IP {} is associated, but not to {}!".format(myip, unit_name)))
+        return myip
+
+    # Go associate it now
+    server.add_floating_ip(fip)
+
+    return myip
+
+
+def usage():
+    print("""Usage: {} [SERVICE|UNIT]
+
+# Add a floating IP to the apache2/0 unit:
+add-floating-ip apache2/0
+
+# Add floating IPs to all units the jenkins-slave service:
+add-floating-ip jenkins-slave
+
+# Add floating IPs to all units in the haproxy and squid services:
+add-floating-ip haproxy squid
+
+# Add floating IPs to the apache2/0 and apache2/1 units:
+export targets="apache2/0 apache2/1"
+add-floating-ip
+""".format("add-floating-ip"))
+
+
+def main():
+    import sys
+
+    if len(sys.argv) >= 2:
+        args = sys.argv[1:]
+    elif 'targets' in os.environ:
+        args = os.environ['targets'].split()
+    else:
+        return usage()
+
+    for arg in args:
+        if "/" not in arg:
+            service_name = arg
+            print("{}:".format(service_name))
+            unit_names = units_in_service(service_name)
+            unit_names.sort()
+            for unit_name in unit_names:
+                ip = get_unit_floating_ip(unit_name)
+                print("- {}: {}".format(unit_name, ip))
+        else:
+            unit_name = arg
+            ip = get_unit_floating_ip(unit_name)
+            print("{}: {}".format(unit_name, ip))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/check_version.py b/utils/check_version.py
new file mode 100644
index 0000000..acf82ea
--- /dev/null
+++ b/utils/check_version.py
@@ -0,0 +1,17 @@
+# At the time of writing (June 2021), we still need to deploy code to PS4.5.
+# The devops management host for that cloud runs Ubuntu 14.04, which has
+# Python 3.4, and which also doesn't have many of the libraries we need
+# packaged for Python 3.  This abominable hack checks whether we're on 3.4,
+# and if so re-execs itself using Python 2.
+#
+# The Python version itself isn't really the important factor here, but it's
+# a proxy indicator that we can check easily.
+#
+# We should drop this as soon as we no longer need to support deploying from
+# Ubuntu 14.04.
+
+import os
+import sys
+
+if sys.version_info[0] == 3 and sys.version_info[1] < 5:
+    os.execv('/usr/bin/python2', [sys.argv[0]] + sys.argv)
diff --git a/utils/custom-secgroups.py b/utils/custom-secgroups.py
new file mode 100755
index 0000000..a3b356b
--- /dev/null
+++ b/utils/custom-secgroups.py
@@ -0,0 +1,527 @@
+#! /usr/bin/python3
+# By wgrant, with some neutron manipulation stolen from cjwatson's
+# clean-up-secgroups.py.
+
+from __future__ import print_function
+
+import check_version  # noqa: F401
+
+__metaclass__ = type
+
+import argparse
+import base64
+import collections
+import functools
+import itertools
+import os.path
+import subprocess
+import sys
+import tempfile
+
+import netaddr
+import yaml
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+
+@functools.total_ordering
+class Rule:
+    _keys = [
+        "direction", "ethertype", "notrack", "protocol",
+        "port_range_min", "port_range_max", "remote_ip_prefix",
+        ]
+
+    def __init__(self, direction="ingress", family=None, protocol=None,
+                 port=None, cidr=None, notrack=False):
+        self.direction = direction
+        self.ethertype = family
+        self.protocol = protocol
+        self.notrack = notrack
+        if port is not None:
+            self.port_range_min = self.port_range_max = port
+        elif protocol in ("tcp", "udp"):
+            self.port_range_min = 1
+            self.port_range_max = 65535
+        else:
+            self.port_range_min = self.port_range_max = None
+        self.remote_ip_prefix = cidr
+
+    @classmethod
+    def from_neutron(cls, neutron_rule):
+        rule = Rule()
+        for key in cls._keys:
+            if key != 'notrack':
+                setattr(rule, key, neutron_rule[key])
+        rule.neutron_id = neutron_rule["id"]
+        return rule
+
+    def to_neutron(self):
+        return {
+            key: getattr(self, key) for key in self._keys if key != "notrack"}
+
+    def to_iptables(self, chain):
+        iptables_cmd = "ip6tables" if self.ethertype == "IPv6" else "iptables"
+        command = [iptables_cmd, "-A", chain]
+        if self.notrack:
+            command_notrack = [iptables_cmd, "-t", "raw", "-I", "PREROUTING"]
+        if (self.remote_ip_prefix is not None and
+                not self.remote_ip_prefix.endswith("/0")):
+            command.extend(["-s", self.remote_ip_prefix])
+            if self.notrack:
+                command_notrack.extend(["-s", self.remote_ip_prefix])
+        if self.ethertype == "IPv6" and self.protocol == "icmp":
+            protocol = "icmpv6"
+        else:
+            protocol = self.protocol
+        if self.protocol is not None:
+            command.extend(["-p", protocol])
+            if self.notrack:
+                command_notrack.extend(["-p", protocol])
+            if self.port_range_min is not None:
+                if self.port_range_min == self.port_range_max:
+                    port_range = str(self.port_range_min)
+                else:
+                    port_range = "{}:{}".format(
+                        self.port_range_min, self.port_range_max)
+                command.extend(["--dport", port_range])
+                if self.notrack:
+                    command_notrack.extend(["--dport", port_range])
+        command.extend(["-j", "ACCEPT"])
+        commands = [" ".join(shlex_quote(arg) for arg in command)]
+        if self.notrack:
+            command_notrack.extend(["-j", "NOTRACK"])
+            command.append(
+                " ".join(shlex_quote(arg) for arg in command_notrack))
+        return "\n".join(commands)
+
+    @property
+    def _value_tuple(self):
+        return tuple(getattr(self, key) for key in self._keys)
+
+    def __eq__(self, other):
+        return (
+            self.__class__ == other.__class__
+            and self._value_tuple == other._value_tuple)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __lt__(self, other):
+        return self._value_tuple < other._value_tuple
+
+    def __hash__(self):
+        return hash(tuple(getattr(self, key) for key in self._keys))
+
+    def __str__(self):
+        desc = "{} {} {}".format(self.direction, self.ethertype, self.protocol)
+        if self.remote_ip_prefix is not None:
+            desc += ", for {}".format(self.remote_ip_prefix)
+        else:
+            desc += ", for any host"
+        if self.port_range_min is not None:
+            desc += ", ports {}-{}".format(
+                self.port_range_min, self.port_range_max)
+        return desc
+
+    def __repr__(self):
+        return '<%s %s>' % (self.__class__.__name__, str(self))
+
+
+def get_juju_version():
+    return subprocess.check_output(
+        ["juju", "version"], universal_newlines=True).rstrip("\n")
+
+
+def get_juju_uuid():
+    [model] = yaml.safe_load(subprocess.check_output(
+        ["juju", "show-model"], universal_newlines=True)).values()
+    return "{}-{}".format(model["controller-uuid"], model["model-uuid"])
+
+
+def get_juju_uuid_except_not():
+    [model] = yaml.safe_load(subprocess.check_output(
+        ["juju", "show-model"], universal_newlines=True)).values()
+    # Juju 2.2 rudely includes the "OWNER/" prefix. 2.1 does not.
+    model_name = model["name"].rsplit('/', 1)[-1]
+    return "{}-{}".format(model["model-uuid"][-6:], model_name)
+
+
+def get_juju_status():
+    return yaml.safe_load(
+        subprocess.check_output(["juju", "status", "--format=yaml"]))
+
+
+def find_machines_for_service(status, service):
+    application = status['applications'][service]
+    if not application.get('subordinate-to', []):
+        return [
+            (unit['machine'], status['machines'][unit['machine']])
+            for unit in application.get('units', {}).values()]
+    else:
+        return [
+            (unit['machine'], status['machines'][unit['machine']])
+            for unit in itertools.chain(*(
+                service.get('units', {}).values()
+                for service in status['applications'].values()))
+            if any(sub.startswith(service + '/')
+                   for sub in unit.get('subordinates', []))]
+
+
+def add_rule(neutron, secgroup, rule):
+    neutron_rule = rule.to_neutron()
+    neutron_rule["security_group_id"] = secgroup["id"]
+    if rule.remote_ip_prefix is None:
+        neutron_rule["remote_group_id"] = secgroup["id"]
+    neutron.create_security_group_rule(
+        body={"security_group_rule": neutron_rule})
+
+
+_openstack_session = None
+
+
+def get_os_session():
+    from keystoneauth1 import loading, session
+
+    global _openstack_session
+    if _openstack_session is not None:
+        return _openstack_session
+
+    if "OS_PROJECT_NAME" in os.environ:
+        project_name = os.environ["OS_PROJECT_NAME"]
+    else:
+        project_name = os.environ["OS_TENANT_NAME"]
+    loader = loading.get_plugin_loader('password')
+    auth = loader.load_from_options(
+        username=os.environ["OS_USERNAME"],
+        password=os.environ["OS_PASSWORD"],
+        auth_url=os.environ["OS_AUTH_URL"],
+        project_name=project_name,
+        project_domain_name=os.environ["OS_PROJECT_DOMAIN_NAME"],
+        user_domain_name=os.environ["OS_USER_DOMAIN_NAME"],
+        )
+    _openstack_session = session.Session(auth=auth)
+    return _openstack_session
+
+
+def get_nova_client():
+    from novaclient import client as novaclient
+
+    if os.environ.get("OS_IDENTITY_API_VERSION") == "3":
+        auth = {"session": get_os_session()}
+    else:
+        if "OS_PROJECT_NAME" in os.environ:
+            project_name = os.environ["OS_PROJECT_NAME"]
+        else:
+            project_name = os.environ["OS_TENANT_NAME"]
+        auth = {
+            "username": os.environ["OS_USERNAME"],
+            "api_key": os.environ["OS_PASSWORD"],
+            "auth_url": os.environ["OS_AUTH_URL"],
+            "project_id": project_name,
+            "region_name": os.environ["OS_REGION_NAME"],
+            }
+    return novaclient.Client("2", **auth)
+
+
+def get_neutron_client():
+    from neutronclient.v2_0 import client as neutronclient
+
+    if os.environ.get("OS_IDENTITY_API_VERSION") == "3":
+        auth = {"session": get_os_session()}
+    else:
+        if "OS_PROJECT_NAME" in os.environ:
+            project_name = os.environ["OS_PROJECT_NAME"]
+        else:
+            project_name = os.environ["OS_TENANT_NAME"]
+        auth = {
+            "username": os.environ["OS_USERNAME"],
+            "password": os.environ["OS_PASSWORD"],
+            "auth_url": os.environ["OS_AUTH_URL"],
+            "tenant_name": project_name,
+            "region_name": os.environ["OS_REGION_NAME"],
+            }
+    return neutronclient.Client(**auth)
+
+
+def configure_secgroup_rules(args, secgroups):
+    neutron = get_neutron_client()
+
+    existing_groups = {}
+    for sg in neutron.list_security_groups()["security_groups"]:
+        if sg["name"] in existing_groups:
+            raise AssertionError(
+                'Duplicate security group "%s".' % sg["name"])
+        existing_groups[sg["name"]] = sg
+
+    for name, rules in sorted(secgroups.items()):
+        if name in existing_groups:
+            secgroup = existing_groups[name]
+        else:
+            print('Creating security group "%s".' % name)
+            if not args.dry_run:
+                secgroup = neutron.create_security_group(
+                    {"security_group": {"name": name}})["security_group"]
+            else:
+                secgroup = {"name": name, "security_group_rules": []}
+        existing_rules = set(
+            Rule.from_neutron(rule)
+            for rule in secgroup["security_group_rules"])
+        desired_rules = set(Rule(**rule) for rule in rules)
+        for rule in desired_rules:
+            if rule.protocol and not rule.ethertype:
+                raise AssertionError(
+                    "Rule %s cannot specify protocol without family.")
+        to_add = desired_rules - existing_rules
+        to_remove = existing_rules - desired_rules
+        if to_add:
+            print('Adding rules to security group "%s": %s'
+                  % (name, ', '.join(sorted(repr(r) for r in to_add))))
+            if not args.dry_run:
+                for rule in to_add:
+                    add_rule(neutron, secgroup, rule)
+        if to_remove:
+            print('Removing rules from security group "%s": %s'
+                  % (name, ', '.join(sorted(repr(r) for r in to_remove))))
+            if not args.dry_run:
+                for rule in to_remove:
+                    neutron.delete_security_group_rule(rule.neutron_id)
+
+
+def configure_instance_secgroups(args, instances):
+    nova = get_nova_client()
+
+    juju_secgroup_prefix = 'juju-%s' % get_juju_uuid()
+    juju_instance_prefix = 'juju-%s' % get_juju_uuid_except_not()
+
+    servers = nova.servers.list()
+
+    # Map instance IDs to names for logging friendliness.
+    server_names = {server.id: server.name for server in servers}
+    server_ids = {server.name: server.id for server in servers}
+    instances = {
+        server_names[instance]: secgroups
+        for instance, secgroups in instances.items()}
+
+    # Find non-Juju security groups on Juju instances.
+    existing_instances = {
+        server.name: set(
+            sg["name"] for sg in server.security_groups
+            if sg["name"] != juju_secgroup_prefix
+                and not sg["name"].startswith(juju_secgroup_prefix + '-'))
+        for server in servers
+        if server.name.startswith(juju_instance_prefix + '-')}
+
+    for name in sorted(set(instances.keys()) | set(existing_instances.keys())):
+        desired_secgroups = instances.get(name, set())
+        existing_secgroups = existing_instances.get(name, set())
+        to_add = desired_secgroups - existing_secgroups
+        to_remove = existing_secgroups - desired_secgroups
+        if to_add:
+            print('Adding security groups to instance "%s": %s'
+                  % (name, ', '.join(sorted(to_add))))
+            if not args.dry_run:
+                for secgroup in to_add:
+                    nova.servers.add_security_group(server_ids[name], secgroup)
+        if to_remove:
+            print('Removing security groups from instance "%s": %s'
+                  % (name, ', '.join(sorted(to_remove))))
+            if not args.dry_run:
+                for secgroup in to_remove:
+                    nova.servers.remove_security_group(
+                        server_ids[name], secgroup)
+
+
+def juju_install(local_path, machine, remote_path, mode=0o644):
+    """Install a file on a remote machine.
+
+    Marshal the file's contents via base64 so that it can be transferred
+    using "juju run".  ("juju scp" is problematic here because firewall
+    rules might affect SSH.)
+    """
+    with open(local_path, "rb") as local_file:
+        data = local_file.read()
+    shell_command = "printf %%s %s | base64 -d >%s && chmod %o %s" % (
+        shlex_quote(base64.b64encode(data).decode("utf-8")),
+        shlex_quote(remote_path), mode, shlex_quote(remote_path))
+    subprocess.check_call(
+        ["juju", "run", "--quiet", "--machine", machine, "--",
+         "sudo", "sh", "-c", shell_command])
+
+
+def configure_firewall_rules(args, juju_status, config, service_name):
+    import jinja2
+
+    top_dir = os.path.dirname(__file__)
+    templates_dir = os.path.join(top_dir, "templates")
+    template_env = jinja2.Environment(
+        loader=jinja2.FileSystemLoader(templates_dir))
+    template = template_env.get_template("firewall.sh.j2")
+
+    # This is hideous.  OpenStack security groups are implemented at the
+    # border of the environment, but when using iptables we use per-unit
+    # firewalls instead, which means that we need to know which interfaces
+    # are local to this environment so that we can keep things open within
+    # the environment.
+    environment_rules = []
+    for cidr in args.local_networks:
+        if netaddr.IPNetwork(cidr).version == 6:
+            iptables_family = "IPv6"
+        else:
+            iptables_family = "IPv4"
+        rule = Rule(family=iptables_family, cidr=cidr)
+        environment_rules.append(rule.to_iptables("INPUT"))
+
+    chains = {}
+    for name, rules in sorted(config["rules"].items()):
+        commands = [Rule(**rule).to_iptables(name) for rule in rules]
+        chains[name] = commands
+
+    instance_chains = collections.defaultdict(set)
+    for service, properties in config["applications"].items():
+        if properties.get("type", "neutron") == "iptables":
+            for machine_id, _ in find_machines_for_service(
+                    juju_status, service):
+                instance_chains[machine_id].update(properties["rules"])
+    for machine_id in juju_status["machines"]:
+        if machine_id in instance_chains:
+            instance_chains[machine_id].update(config.get("all-units", []))
+
+    for machine, status in sorted(juju_status["machines"].items()):
+        if machine not in instance_chains:
+            continue
+        context = {
+            "environment_rules": environment_rules,
+            "chains": [
+                (name, chains[name])
+                for name in sorted(instance_chains[machine])],
+            }
+        firewall_sh_text = template.render(context)
+        if args.dry_run:
+            print("Host firewall for machine {}:".format(machine))
+            print(firewall_sh_text)
+            print()
+        else:
+            with tempfile.NamedTemporaryFile(mode="w+") as firewall_file:
+                firewall_file.write(template.render(context))
+                firewall_file.flush()
+                juju_install(
+                    firewall_file.name, machine, "/etc/network/firewall.sh",
+                    mode=0o755)
+            juju_install(
+                 os.path.join(top_dir, "files", "lp-firewall.service"),
+                 machine,
+                 "/etc/systemd/system/{}.service".format(service_name))
+            subprocess.check_call(
+                ["juju", "run", "--quiet", "--machine", machine, "--",
+                 "sudo", "systemctl", "enable",
+                 "{}.service".format(service_name)])
+            subprocess.check_call(
+                ["juju", "run", "--quiet", "--machine", machine, "--",
+                 "sudo", "systemctl", "start",
+                 "{}.service".format(service_name)])
+
+
+def get_config(args):
+    config = {"all-units": [], "applications": {}, "rules": {}}
+
+    stage = os.path.basename(os.environ.get('MOJO_STAGE', ''))
+    spec_subdir = os.path.dirname(os.environ.get('MOJO_STAGE', ''))
+    if args.skip_stages:
+        if stage in args.skip_stages.split(','):
+            print('Skipping secgroups setup')
+            sys.exit(0)
+
+    if args.config_path:
+        with open(args.config_path, 'rt') as f:
+            args_config = yaml.safe_load(f.read())
+            config['all-units'].extend(args_config.get('all-units', []))
+            config['applications'].update(args_config['applications'])
+            config['rules'].update(args_config['rules'])
+
+    mojo_stage_config_path = '{}/{}/configs/custom-secgroups-{}.yaml'.format(
+        os.environ['MOJO_SPEC_DIR'], spec_subdir, stage)
+    mojo_default_config_path = (
+        '{}/configs/canonical-is-secgroups-{}.yaml'.format(
+            os.environ['MOJO_SPEC_DIR'], stage))
+    try:
+        with open(mojo_stage_config_path, 'rt') as f:
+            mojo_stage_config = yaml.safe_load(f.read())
+            config['all-units'].extend(mojo_stage_config.get('all-units', []))
+            config['applications'].update(mojo_stage_config['applications'])
+            config['rules'].update(mojo_stage_config['rules'])
+    except IOError:
+        print('Skipping implicit mojo stage secgroups - no config file found')
+    try:
+        with open(mojo_default_config_path, 'rt') as f:
+            mojo_default_config = yaml.safe_load(f.read())
+            config['all-units'].extend(
+                mojo_default_config.get('all-units', []))
+            config['applications'].update(mojo_default_config['applications'])
+            config['rules'].update(mojo_default_config['rules'])
+    except IOError:
+        print('Skipping implicit default mojo secgroups - '
+              'no config file found')
+
+    return config
+
+
+def get_instance_secgroups(config, juju_status):
+    """For each machine, list its needed secgroups. """
+    instance_secgroups = collections.defaultdict(set)
+    for app in juju_status['applications']:
+        properties = config['applications'].get(app)
+        if properties is not None:
+            if properties.get("type", "neutron") != "neutron":
+                continue
+            for _, machine in find_machines_for_service(juju_status, app):
+                instance_secgroups[machine["instance-id"]].update(
+                    properties["rules"])
+        for _, machine in find_machines_for_service(juju_status, app):
+            instance_secgroups[machine["instance-id"]].update(
+                config["all-units"])
+    return instance_secgroups
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Manage custom security groups on a Juju model.")
+    parser.add_argument(
+        "--config-path",
+        help=(
+            "config file to load in addition to Mojo stage and default "
+            "configs"))
+    parser.add_argument(
+        "--dry-run", action="store_true",
+        default=bool(os.environ.get("DRY_RUN", False)))
+    parser.add_argument("--skip-stages", default=os.environ.get("SKIP_STAGES"))
+    parser.add_argument(
+        "--local-networks", default=os.environ.get("LOCAL_NETWORKS", ""),
+        help=(
+            "space-separated CIDR addresses of networks to consider as local "
+            "to this environment for the purpose of host firewalls"))
+    args = parser.parse_args()
+    args.local_networks = args.local_networks.split()
+
+    spec_subdir = os.path.dirname(os.environ.get('MOJO_STAGE', ''))
+
+    config = get_config(args)
+    juju_status = get_juju_status()
+
+    short_model_name = os.path.basename(spec_subdir)
+    if short_model_name.startswith('mojo-'):
+        short_model_name = short_model_name[len('mojo-'):]
+    service_name = '{}-firewall'.format(short_model_name)
+    configure_firewall_rules(args, juju_status, config, service_name)
+
+    if any(properties.get("type", "neutron") == "neutron"
+           for properties in config["applications"].values()):
+        instance_secgroups = get_instance_secgroups(config, juju_status)
+        configure_secgroup_rules(args, config["rules"])
+        configure_instance_secgroups(args, instance_secgroups)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/utils/files/lp-firewall.service b/utils/files/lp-firewall.service
new file mode 100644
index 0000000..f58b093
--- /dev/null
+++ b/utils/files/lp-firewall.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Launchpad host firewall
+DefaultDependencies=no
+Before=network.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/etc/network/firewall.sh break-everything
+
+[Install]
+WantedBy=multi-user.target
diff --git a/utils/get-last-build-label b/utils/get-last-build-label
new file mode 100755
index 0000000..a12e797
--- /dev/null
+++ b/utils/get-last-build-label
@@ -0,0 +1,30 @@
+#! /usr/bin/python3
+
+"""Print the last build label in a particular state.
+
+The combined build label is turned into a form slightly easier to handle in
+shell.
+"""
+
+from __future__ import print_function
+
+import check_version  # noqa: F401
+
+import os
+
+import utils
+
+
+container_name = os.environ.get(
+    'MOJO_DOWNLOADER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+
+combined_build_label = os.environ.get('BUILD_LABEL', '')
+if not combined_build_label:
+    combined_build_label = utils.get_last_build_label(
+        container_name=container_name,
+        build_type=os.environ['ATTEMPTED_OR_SUCCESSFUL'],
+        anonymous=('MOJO_DOWNLOADER_ANONYMOUS' in os.environ))
+build_labels = utils.split_build_label(combined_build_label)
+
+for payload_name, payload_label in build_labels.items():
+    print("{}={}".format(payload_name, payload_label))
diff --git a/utils/get-swift-storage-url b/utils/get-swift-storage-url
new file mode 100755
index 0000000..e34a597
--- /dev/null
+++ b/utils/get-swift-storage-url
@@ -0,0 +1,12 @@
+#! /usr/bin/python3
+
+"""Print the Swift storage URL."""
+
+from __future__ import print_function
+
+import check_version  # noqa: F401
+
+import utils
+
+
+print(utils.get_swift_auth(anonymous=True)[0])
diff --git a/utils/make-branches b/utils/make-branches
new file mode 100755
index 0000000..c34bcb5
--- /dev/null
+++ b/utils/make-branches
@@ -0,0 +1,37 @@
+#! /usr/bin/python3
+
+import check_version  # noqa: F401
+
+import os
+import shutil
+import subprocess
+import sys
+
+import utils
+
+
+combined_build_label = os.environ.get('BUILD_LABEL')
+branch = os.environ.get('BRANCH', 'tip')
+build_dir = os.environ['MOJO_BUILD_DIR']
+
+build_labels = utils.split_build_label(combined_build_label)
+if branch in ("qastaging", "staging"):
+    for payload_name in utils.payload_names():
+        code_dir = "{}/{}-code-".format(build_dir, payload_name)
+
+        if payload_name in build_labels:
+            revid = build_labels[payload_name]
+        else:
+            revid = subprocess.check_output(
+                ["git", "rev-parse", "HEAD"], cwd=code_dir + "tip",
+                universal_newlines=True).rstrip("\n")
+
+        branch_dir = code_dir + branch
+        if os.path.exists(branch_dir):
+            shutil.rmtree(branch_dir)
+
+        sys.stdout.write("Branching {} at {}\n".format(revid, branch_dir))
+        subprocess.check_call(['git', 'clone', code_dir + 'tip', branch_dir])
+        subprocess.check_call(['git', 'checkout', revid], cwd=branch_dir)
+else:
+    sys.exit("Error: valid values for BRANCH: qastaging, staging")
diff --git a/utils/publish-build-assets b/utils/publish-build-assets
new file mode 100755
index 0000000..11ddd18
--- /dev/null
+++ b/utils/publish-build-assets
@@ -0,0 +1,24 @@
+#! /usr/bin/python3
+
+import check_version  # noqa: F401
+
+import os
+
+import utils
+
+
+code_container_name = os.environ.get(
+    'MOJO_PUBLISHER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+
+for payload_name in utils.payload_names():
+    overwrite = 'FORCE_REBUILD' in os.environ
+
+    tarball_path = utils.get_tarball_path(
+        project_name=payload_name,
+        tarball_file_name=payload_name + '.tar.gz')
+
+    utils.ensure_container_privs(code_container_name, world_readable=True)
+
+    utils.publish_to_swift(
+        tarball_path, code_container_name, overwrite=overwrite,
+        world_readable=True)
diff --git a/utils/publish-last-build-label b/utils/publish-last-build-label
new file mode 100755
index 0000000..0ff812b
--- /dev/null
+++ b/utils/publish-last-build-label
@@ -0,0 +1,34 @@
+#! /usr/bin/python3
+
+"""Publishes the last attempted build label to a specific swift file
+for use by other jobs which may trigger a spec run."""
+
+import check_version  # noqa: F401
+
+import os
+import shutil
+import tempfile
+
+import utils
+
+
+container_name = os.environ.get(
+    'MOJO_PUBLISHER_CONTAINER_NAME', os.environ['MOJO_PROJECT'] + '-builds')
+labels = []
+
+for payload_name in utils.payload_names():
+    revid = utils.get_code_branch_revision(payload_name)
+    labels.append("{}={}".format(payload_name, revid))
+
+tmpdir = tempfile.mkdtemp()
+
+filename = "last-{}-build-label.txt".format(
+    os.environ["ATTEMPTED_OR_SUCCESSFUL"])
+
+with open(os.path.join(tmpdir, filename), 'w+') as f:
+    f.write(":".join(labels))
+
+utils.publish_file_to_swift(
+    filename, container_name, cwd=tmpdir, world_readable=True)
+
+shutil.rmtree(tmpdir)
diff --git a/utils/set-local-config b/utils/set-local-config
new file mode 100755
index 0000000..f5e5845
--- /dev/null
+++ b/utils/set-local-config
@@ -0,0 +1,46 @@
+#! /usr/bin/python3
+
+from __future__ import print_function
+
+import check_version  # noqa: F401
+
+import argparse
+import os.path
+
+import yaml
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+    "--bundle", default=False, action="store_true",
+    help="Use bundle output format.")
+parser.add_argument(
+    "--default", default=False, action="store_true",
+    help="Only set options if they are not already set.")
+parser.add_argument("stack", metavar="STACK")
+parser.add_argument("service", metavar="SERVICE")
+parser.add_argument("options", metavar="KEY=VALUE", nargs="*")
+args = parser.parse_args()
+
+local_config_path = os.path.join(
+    os.environ['MOJO_LOCAL_DIR'], 'deploy-secrets')
+
+print("Updating local config at {}".format(local_config_path))
+
+if os.path.exists(local_config_path):
+    with open(local_config_path) as config_file:
+        config = yaml.safe_load(config_file)
+else:
+    config = {}
+if args.bundle:
+    applications = config.setdefault('applications', {})
+else:
+    applications = config.setdefault(args.stack, {}).setdefault('services', {})
+config_options = applications.setdefault(args.service, {}).setdefault(
+    'options', {})
+for option in args.options:
+    key, value = option.split('=', 1)
+    if not args.default or key not in config_options:
+        config_options[key] = value
+with open(local_config_path, 'w') as config_file:
+    yaml.dump(config, config_file, indent=4, default_flow_style=False)
diff --git a/utils/set-service-options b/utils/set-service-options
new file mode 100755
index 0000000..8b12678
--- /dev/null
+++ b/utils/set-service-options
@@ -0,0 +1,123 @@
+#! /usr/bin/python3
+
+from __future__ import print_function
+
+import check_version  # noqa: F401
+
+import base64
+import numbers
+import sys
+import yaml
+
+import utils
+
+
+def get_options(options):
+    for name, value in options.items():
+        if value is None:
+            value = ''
+        if isinstance(value, (numbers.Integral, float, bool)):
+            value = str(value).lower()
+        elif isinstance(value, bytes) and bytes != str:
+            value = value.decode("UTF-8")
+        elif value.startswith("include-file:"):
+            # @ is juju-set lingo for include value from file path
+            value = value.replace('include-file://', '@')
+        elif value.startswith("include-base64:"):
+            file_path = value[15:]
+            with open(file_path, "rb") as b:
+                value = base64.b64encode(b.read()).decode("UTF-8")
+
+        yield (name, value)
+
+
+def update_config(svc_name, options):
+    options_list = ["{}={}".format(k, v) for k, v in options.items()]
+    output = utils.run(
+        None, ['juju', 'config', svc_name] + options_list,
+        echo=False, return_stderr=True)
+
+    # juju-set only gives us output if a key is *already* set to a value,
+    # so we first check for this key in any possible WARNINGs
+    options_updated = ['"{}"'.format(o) for o in options.keys()
+                       if o not in output]
+    if options_updated:
+        print("{} options updated: {}".format(
+            svc_name, ', '.join(options_updated)))
+    # This is the text from the warning for an already set value
+    # so any other output is an error
+    elif 'already has the value' not in output.strip():
+        print(output, file=sys.stderr)
+
+
+def update_constraints(svc_name, constraints):
+    current_constraints = utils.run(
+        None,
+        ['juju', 'get-constraints', svc_name],
+        echo=False,
+    ).strip()
+
+    if constraints != current_constraints:
+        cmd = ['juju', 'set-constraints']
+        cmd.extend([svc_name, constraints])
+        output = utils.run(
+            None, cmd, echo=False, return_stderr=True).strip()
+        if not output:
+            print('{} constraints updated to: {}'.format(
+                svc_name, constraints))
+        else:
+            print(output, file=sys.stderr)
+
+
+def update_service(svc_name, service):
+    if not service.get('options'):
+        print("Skipping service {} for it has no options defined.".format(
+            svc_name))
+        return
+
+    print("Checking service {} for changes".format(svc_name))
+    options = dict(get_options(service['options']))
+    config = yaml.safe_load(utils.run(
+        None,
+        ['juju', 'config', svc_name, '--format=yaml'],
+        echo=False))
+    settings = config['settings']
+    actual_options = {}
+    for name, value in options.items():
+        if name in settings:
+            actual_options[name] = value
+        else:
+            print('Skipping setting option {} as service {} '
+                  'does not support it'.format(name, svc_name))
+    update_config(svc_name, actual_options)
+    if 'constraints' in service:
+        update_constraints(svc_name, service['constraints'])
+
+
+def main():
+    deploy_config = utils.mojo_deploy_show()
+    services = deploy_config['services']
+    switch_services = deploy_config.get('templates', {}).get('services', {})
+    juju_services = utils.juju_services()
+    for service_name, service in services.items():
+        if service_name not in juju_services:
+            print(
+                "Skipping service {} as is not deployed.".format(service_name))
+        else:
+            update_service(service_name, service)
+
+    for service_name, service in switch_services.items():
+        updated = False
+        for name in juju_services:
+            if name.startswith(service_name):
+                if name.split('-r')[0] == service_name:
+                    update_service(name, service)
+                    updated = True
+
+        if not updated:
+            print("Skipping switch service {} as is not deployed.".format(
+                service_name))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/utils/templates/firewall.sh.j2 b/utils/templates/firewall.sh.j2
new file mode 100755
index 0000000..0624b9f
--- /dev/null
+++ b/utils/templates/firewall.sh.j2
@@ -0,0 +1,64 @@
+#! /bin/sh
+
+# This file is managed by a Mojo script.  Do not edit the generated version.
+
+set -e
+
+# "break-everything" == manual, forced run (not recommended)
+# "/etc/network/firewall.sh" == being run by the check_firewall Nagios check
+case $1 in
+    break-everything|/etc/network/firewall.sh)
+        ;;
+    *)
+        cat >&2 <<EOF
+**** DO NOT RUN THIS FILE BY HAND ****
+  This script will flush all non-primary chains and should only be run when
+  restarting networking or on reboot.  If in doubt, please ask developers.
+**** DO NOT RUN THIS FILE BY HAND ****
+EOF
+        exit 1
+        ;;
+esac
+
+ip46tables () {
+    iptables "$@"
+    ip6tables "$@"
+}
+
+logger -t firewall "Starting run of $(basename "$0")"
+
+# For LXD containers, this module must be inserted on the host instead.
+modprobe nf_conntrack || true
+
+# Flush all rules and chains.
+ip46tables -F
+ip46tables -F -t raw
+ip46tables -X
+
+# Allow localhost traffic and responses from outbound connections.
+ip46tables -A INPUT -i lo -j ACCEPT
+ip46tables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
+{%- if environment_rules %}
+
+# Allow anything from the local environment.
+{%- for rule in environment_rules %}
+{{ rule }}
+{%- endfor %}
+{%- endif %}
+
+# Set default policy.
+ip46tables -P INPUT DROP
+ip46tables -P FORWARD DROP
+ip46tables -P OUTPUT ACCEPT
+{%- if chains %}
+
+# Service rules.
+{%- for chain, rules in chains %}
+ip46tables -N {{ chain }}
+ip46tables -A INPUT -j {{ chain }}
+{%- for rule in rules %}
+{{ rule }}
+{%- endfor %}
+{%- endfor %}
+{%- endif %}
+
diff --git a/utils/utils.py b/utils/utils.py
new file mode 100644
index 0000000..724196d
--- /dev/null
+++ b/utils/utils.py
@@ -0,0 +1,356 @@
+import inspect
+import json
+import os
+try:
+    from shlex import quote as shell_quote
+except ImportError:
+    from pipes import quote as shell_quote
+import subprocess
+import sys
+
+import requests
+import yaml
+
+
+if sys.version_info[0] >= 3:
+    __popen_args = set(inspect.signature(subprocess.Popen.__init__).parameters)
+else:
+    __popen_args = set(inspect.getargspec(subprocess.Popen.__init__).args)
+__popen_args -= set(['self'])
+__reserved_popen_args = set(['args', 'stdout', 'stderr'])
+__popen_args -= __reserved_popen_args
+
+
+def run(secrets, cmd, **kwargs):
+    """User friendly alternative to subprocess.check_output
+
+     - exception includes unsplit command, for easy copy/paste
+     - secrets are masked in all output
+     - command is echoed by default, echo=False disables
+     - stdout/err are echoed on failure, echo=False disables
+
+    """
+    echo = kwargs.pop('echo', True)
+    reserved_args = [k for k in __reserved_popen_args if k in kwargs]
+    if reserved_args:
+        raise ValueError(
+            '{} popen arguments not allowed, they will be overridden'.format(
+                ', '.join(reserved_args)))
+    cmd_str = " ".join(shell_quote(arg) for arg in cmd)
+    if secrets:
+        for secret in secrets:
+            cmd_str = cmd_str.replace(secret, '*' * len(secret))
+    if echo:
+        print('running: ' + cmd_str)
+    popen_args = dict((k, v) for k, v in kwargs.items() if k in __popen_args)
+    process = subprocess.Popen(
+        args=cmd,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        universal_newlines=True,
+        **popen_args)
+    stdout, stderr = process.communicate()
+    retcode = process.poll()
+    if retcode:
+        if echo:
+            print(' retcode: ' + str(retcode))
+            print(' stdout:  ' + stdout)
+            print(' stderr:  ' + stderr)
+            if 'cwd' in kwargs:
+                print(' cwd:     ' + kwargs['cwd'])
+        raise subprocess.CalledProcessError(retcode, cmd_str, output=stdout)
+
+    return stdout
+
+
+def mojo_run(*args, **kwargs):
+    """Run commands from the spec dir"""
+    kwargs['cwd'] = os.environ['MOJO_SPEC_DIR']
+    return run(*args, **kwargs)
+
+
+def juju_status(service_name=''):
+    cmd = ['juju', 'status', '--format', 'json']
+    if service_name:
+        cmd.append(service_name)
+    juju_status = run(None, cmd)
+    return json.loads(juju_status)
+
+
+def juju_services(service_name='', *args, **kwargs):
+    status = juju_status(service_name, *args, **kwargs)
+    return status['applications']
+
+
+def get_units_for_service(service_name):
+    """Return a list of all units in a service"""
+    status = juju_status(service_name)
+    units = status['applications'][service_name]['units']
+    return list(units.keys())
+
+
+def get_first_unit_for_service(service_name):
+    """Return the name of the first available unit for a service."""
+    return get_units_for_service(service_name)[0]
+
+
+def get_publisher_creds():
+    creds = {}
+    for suffix in (
+            'AUTH_URL', 'USERNAME', 'PASSWORD', 'REGION_NAME',
+            'USER_DOMAIN_NAME', 'PROJECT_DOMAIN_NAME', 'PROJECT_NAME',
+            'TENANT_NAME'):
+        publisher_name = 'MOJO_PUBLISHER_' + suffix
+        os_name = 'OS_' + suffix
+        if publisher_name in os.environ:
+            creds[os_name] = os.environ[publisher_name]
+        elif os_name in os.environ:
+            creds[os_name] = os.environ[os_name]
+    # Smooth the transition between v2 and v3 authentication.
+    if 'OS_TENANT_NAME' not in creds:
+        creds['OS_TENANT_NAME'] = creds['OS_PROJECT_NAME']
+    elif 'OS_PROJECT_NAME' not in creds:
+        creds['OS_PROJECT_NAME'] = creds['OS_TENANT_NAME']
+    return creds
+
+
+def get_downloader_creds():
+    creds = {}
+    for suffix in (
+            'AUTH_URL', 'USERNAME', 'PASSWORD', 'REGION_NAME',
+            'USER_DOMAIN_NAME', 'PROJECT_DOMAIN_NAME', 'PROJECT_NAME',
+            'TENANT_NAME'):
+        downloader_name = 'MOJO_DOWNLOADER_' + suffix
+        os_name = 'OS_' + suffix
+        if downloader_name in os.environ:
+            creds[os_name] = os.environ[downloader_name]
+        elif os_name in os.environ:
+            creds[os_name] = os.environ[os_name]
+    # Smooth the transition between v2 and v3 authentication.
+    if 'OS_TENANT_NAME' not in creds and 'OS_PROJECT_NAME' in creds:
+        creds['OS_TENANT_NAME'] = creds['OS_PROJECT_NAME']
+    elif 'OS_PROJECT_NAME' not in creds and 'OS_TENANT_NAME' in creds:
+        creds['OS_PROJECT_NAME'] = creds['OS_TENANT_NAME']
+    return creds
+
+
+def swift_run(cmd, *args, **kwargs):
+    """Helper for running swift using the MOJO_PUBLISHER creds."""
+    publisher_creds = get_publisher_creds()
+    secrets = [publisher_creds['OS_PASSWORD']]
+    swift_cmd = [
+        'swift',
+        '--os-auth-url', publisher_creds['OS_AUTH_URL'],
+        '--os-username', publisher_creds['OS_USERNAME'],
+        '--os-password', publisher_creds['OS_PASSWORD'],
+        '--os-tenant-name', publisher_creds['OS_TENANT_NAME'],
+        '--os-region-name', publisher_creds['OS_REGION_NAME'],
+        ]
+    prefixed_cmd = swift_cmd + cmd
+    return run(secrets, prefixed_cmd, *args, **kwargs)
+
+
+def ensure_container_privs(container_name, world_readable=False):
+    """Ensure that the container exists and has suitable permissions.
+
+    It will be writeable by MOJO_PUBLISHER_USERNAME.  If world_readable is
+    True (suitable for the case where the code being published is public
+    anyway and contains no secrets), then the container will be
+    world-readable; otherwise, it will be readable by
+    MOJO_DOWNLOADER_USERNAME.
+
+    This allows us to give services suitable credentials for getting the
+    built code from a container.
+    """
+    if world_readable:
+        swift_run(['post', container_name, '--read-acl', '.r:*'])
+    else:
+        downloader_creds = get_downloader_creds()
+        downloader = (downloader_creds['OS_TENANT_NAME'] + ':' +
+                      downloader_creds['OS_USERNAME'])
+        swift_run(['post', container_name, '--read-acl', downloader])
+
+
+def split_tarball_path(tarball_path):
+    """Splits /my/big/path/to/tarball/project/r25/foo.tgz
+
+    Returns a tuple of:
+        ('/my/big/path/to/tarball', 'project/r25/foo.tgz')
+    """
+    slash_pos = tarball_path.rfind('/')
+    slash_pos = tarball_path.rfind('/', 0, slash_pos)
+    slash_pos = tarball_path.rfind('/', 0, slash_pos)
+    return tarball_path[0:slash_pos], tarball_path[slash_pos + 1:]
+
+
+def publish_file_to_swift(file_path, container_name, cwd=None,
+                          overwrite=True, world_readable=False):
+    """Publish a file to the container owned by MOJO_PUBLISHER."""
+    already_published = False
+    # swift 1.0 returns an error code if the file doesn't exist, but
+    # later versions don't.
+    try:
+        stats = swift_run(['stat', container_name, file_path])
+        if file_path in stats:
+            already_published = True
+    except subprocess.CalledProcessError:
+        pass
+
+    if already_published:
+        print("File {} already published to {}.".format(
+            file_path, container_name))
+        if not overwrite:
+            return
+
+    print("Publishing {} to {} container.".format(file_path, container_name))
+    try:
+        swift_run(['upload', container_name, file_path], cwd=cwd)
+    except subprocess.CalledProcessError:
+        sys.exit("Failed to upload {} to swift container {}".format(
+            file_path, container_name))
+
+    storage_url = get_swift_auth(anonymous=world_readable)[0]
+    print("Published file: {}/{}/{}".format(
+        storage_url, container_name, file_path))
+
+
+def publish_to_swift(tarball_path, container_name, overwrite=True,
+                     world_readable=False):
+    """Publish the tarball to the container.
+
+    The tarball is published including the path of its direct
+    parent. For example, the tarball_path of "/srv/foo/bar/r20/myproject.tgz"
+    is published as "bar/r20/myproject.tgz".
+    """
+    tarball_builds_dir, tarball_name = split_tarball_path(tarball_path)
+    publish_file_to_swift(
+        tarball_name, container_name,
+        cwd=tarball_builds_dir, overwrite=overwrite,
+        world_readable=world_readable)
+
+
+def get_code_branch_revision(project_name, branch=None):
+    if branch is None:
+        branch = os.environ['BRANCH']
+    code_dir = "{MOJO_BUILD_DIR}/{project_name}-code-{branch}"
+    code_dir = code_dir.format(
+        project_name=project_name,
+        MOJO_BUILD_DIR=os.environ['MOJO_BUILD_DIR'],
+        branch=branch)
+
+    return subprocess.check_output(
+        ['git', 'rev-parse', 'HEAD'], cwd=code_dir,
+        universal_newlines=True).rstrip('\n')
+
+
+def get_tarball_path(project_name, tarball_file_name):
+    """Return the expected standard path to the tarball for a project.
+
+    Assumes that the env BRANCH identifies either the tip or previous
+    branch in the build dir.
+    """
+    tarball_builds_dir = "{MOJO_BUILD_DIR}/{project_name}-builds"
+    tarball_builds_dir = tarball_builds_dir.format(
+        MOJO_BUILD_DIR=os.environ['MOJO_BUILD_DIR'],
+        project_name=project_name)
+
+    build_label = get_code_branch_revision(project_name)
+    current_tarball = "{}/{}".format(build_label, tarball_file_name)
+
+    return os.path.join(tarball_builds_dir, current_tarball)
+
+
+def get_swift_auth(anonymous=False):
+    if anonymous:
+        if 'MOJO_DOWNLOADER_STORAGE_URL' in os.environ:
+            return os.environ['MOJO_DOWNLOADER_STORAGE_URL'], None
+        elif 'OS_STORAGE_URL' in os.environ:
+            return os.environ['OS_STORAGE_URL'], None
+        else:
+            # We only use these credentials to figure out the storage URL;
+            # after that it should be possible to fetch files anonymously.
+            creds = get_publisher_creds()
+    else:
+        creds = get_downloader_creds()
+
+    import swiftclient
+
+    url, token = swiftclient.get_auth(
+        creds['OS_AUTH_URL'], creds['OS_USERNAME'], creds['OS_PASSWORD'],
+        auth_version=os.environ.get('OS_AUTH_VERSION', '2.0'),
+        os_options={
+            'tenant_name': creds['OS_TENANT_NAME'],
+            'user_domain_name': creds.get('OS_USER_DOMAIN_NAME'),
+            'project_domain_name': creds.get('OS_PROJECT_DOMAIN_NAME'),
+            'project_name': creds['OS_PROJECT_NAME'],
+            })
+    if anonymous:
+        token = None
+    return url, token
+
+
+def get_last_build_label(container_name, build_type="successful",
+                         anonymous=False):
+    headers = {}
+    url, token = get_swift_auth(anonymous=anonymous)
+    if token is not None:
+        headers['X-Auth-Token'] = token
+
+    object_uri = (
+        '{}/{}/last-{}-build-label.txt'.format(
+            url, container_name, build_type))
+    response = requests.get(object_uri, headers=headers)
+
+    if not response.ok:
+        return None
+
+    return response.text.strip()
+
+
+def get_revision_id_from_uri(uri, revision_header):
+    """Return the current revision of a payload."""
+    response = requests.head(uri)
+    return response.headers[revision_header]
+
+
+def mojo_deploy_show(config=None, target=None, local=None):
+    if config is None:
+        config = os.environ.get('CONFIG', 'services')
+    if target is None:
+        target = os.environ.get('TARGET')
+    mojo_options = ['--options']
+    if config is not None:
+        mojo_options.append('config={}'.format(config))
+    if target is not None:
+        mojo_options.append('target={}'.format(target))
+    if local is None:
+        local = os.environ.get('LOCAL')
+    if local is not None:
+        if not os.path.exists(local):
+            local = os.path.join(os.environ.get('MOJO_LOCAL_DIR', ''), local)
+        if os.path.exists(local):
+            mojo_options.append('local={}'.format(local))
+
+    config_yaml = mojo_run(None, ['mojo', 'deploy-show'] + mojo_options)
+    return yaml.safe_load(config_yaml)
+
+
+def payload_names():
+    payload_names = []
+    for name in sorted(os.listdir(os.environ['MOJO_BUILD_DIR'])):
+        if name.endswith('-code-tip'):
+            payload_name = name[:-len('-code-tip')]
+            if (payload_name == 'turnipcake' and
+                    os.path.basename(os.environ['MOJO_STAGE']) != 'devel'):
+                continue
+            payload_names.append(payload_name)
+    return payload_names
+
+
+def split_build_label(combined_build_label):
+    build_labels = {}
+    if combined_build_label is not None:
+        for build_label in combined_build_label.split(':'):
+            payload_name, payload_label = build_label.split('=', 1)
+            build_labels[payload_name] = payload_label
+    return build_labels
diff --git a/utils/verify b/utils/verify
new file mode 100755
index 0000000..7ad05d9
--- /dev/null
+++ b/utils/verify
@@ -0,0 +1,61 @@
+#! /bin/bash
+
+set -eu
+
+# If we have any etc bzr nagios checks, we need to wait up to 15 minutes
+# for the cron to run to populate the check file, so just ignore those
+SKIP_CHECKS="check_etc_bzr|check_ksplice"
+# The load takes a few minutes to settle after a deployment.
+SKIP_CHECKS="${SKIP_CHECKS}|check_load"
+# Log archive won't happen for a day after deployment.
+SKIP_CHECKS="${SKIP_CHECKS}|check_log_archive_status"
+# check_users check is not correctly defined
+SKIP_CHECKS="${SKIP_CHECKS}|check_users"
+# add checks to be excluded from the command line
+for exclude in "$@"; do
+    SKIP_CHECKS="${SKIP_CHECKS}|$exclude"
+done
+# add checks to be excluded from the EXTRA_SKIP_CHECKS environment variable
+# as set in a manifest with: verify config=verify-nrpe EXTRA_SKIP_CHECKS="foo|bar"
+EXTRA_SKIP_CHECKS=${EXTRA_SKIP_CHECKS:-}
+if [ -n "$EXTRA_SKIP_CHECKS" ]
+then
+    SKIP_CHECKS="${SKIP_CHECKS}|${EXTRA_SKIP_CHECKS}"
+fi
+echo "Skipping the following nagios checks: ${SKIP_CHECKS}"
+
+check() {
+    juju ssh $1 "egrep -h '^command' /etc/nagios/nrpe.d/check_* |\
+        grep -Ev '${SKIP_CHECKS}' | cut -d'=' -f2- | sed 's/.*/(set -x; &) || \
+        echo MOJO_NAGIOS_FAIL /'|sudo -u nagios -s bash" 2>/dev/null
+}
+
+# This will only find services that have 'nrpe' in their name somewhere, and
+# are all lower case alpha numeric with dashes.
+# WILL match:
+#  nrpe, u1-nrpe, u1-nrpe-app, nrpe-app
+NRPE_UNITS=$(juju status --format yaml| sed -rn 's/^ *([-a-z0-9]*nrpe[-a-z0-9]*\/[0-9]+):$/\1/p')
+NAGIOS_OUTPUT=$(
+    for unit in $NRPE_UNITS; do
+        check $unit | sed -e "s#^#$unit: #" -e 's/PASS [^ ]* /PASS **redacted** /'
+    done
+)
+
+echo "${NAGIOS_OUTPUT}"
+
+NAGIOS_FAIL=$(echo "${NAGIOS_OUTPUT}" | grep MOJO_NAGIOS_FAIL) || true
+
+if [ -n "${NAGIOS_FAIL}" ]; then
+    echo "########################"
+    echo "# Nagios Checks Failed #"
+    echo "########################"
+    exit 1
+else
+    echo "########################"
+    echo "# Nagios Checks Passed #"
+    echo "########################"
+fi
+
+echo "#########################"
+echo "# Successfully verified #"
+echo "#########################"
diff --git a/utils/wait-service-status b/utils/wait-service-status
new file mode 100755
index 0000000..35ab2b1
--- /dev/null
+++ b/utils/wait-service-status
@@ -0,0 +1,43 @@
+#! /usr/bin/python3
+
+from argparse import ArgumentParser
+import json
+import subprocess
+import sys
+import time
+
+
+def main():
+    parser = ArgumentParser(
+        description='Wait for a service to reach a given status.')
+    parser.add_argument(
+        '--service',
+        help='systemd service name (defaults to application name)')
+    parser.add_argument('application', help='Juju application name')
+    parser.add_argument('status', help='systemd status to wait for')
+    args = parser.parse_args()
+    if not args.service:
+        args.service = args.application
+
+    start_time = time.time()
+    while True:
+        results = json.loads(subprocess.check_output(
+            ['juju', 'run', '--format=json', '--application', args.application,
+             '--', 'systemctl', 'is-active', args.service],
+            universal_newlines=True))
+        if all(unit['Stdout'].strip() == args.status for unit in results):
+            break
+        if time.time() >= start_time + 120:
+            print(
+                'Service {service} on {application} did not reach status '
+                '{status} after 60 seconds'.format(
+                    service=args.service, application=args.application,
+                    status=args.status),
+                file=sys.stderr)
+            print(results)
+            sys.exit(1)
+        time.sleep(1)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/vbuilder/bundle.yaml b/vbuilder/bundle.yaml
new file mode 100644
index 0000000..d6d0784
--- /dev/null
+++ b/vbuilder/bundle.yaml
@@ -0,0 +1,662 @@
+{%- set log_hosts_allow = "carob.canonical.com launchpad-bastion-ps5.internal" %}
+
+{%- if stage_name == "production" %}
+{%-   set clamav_database_url = "http://clamav-database-mirror.lp.internal/"; %}
+{%-   set content_id_template = "launchpad-buildd:production" %}
+{%-   set dns_update_host_bos01 = "10.189.0.2" %}
+{%-   set dns_update_host_bos02 = "10.189.128.2" %}
+{%-   set dns_update_host_bos03 = "10.189.128.2" %}
+{%-   set dns_update_host_lcy02 = "10.131.53.11 10.131.53.12 10.131.53.13" %}
+{%-   set dns_update_key_name = "vbuilder-manage" %}
+{%-   set domain_bos01 = "vbuilder.bos01.scalingstack" %}
+{%-   set domain_bos02 = "vbuilder.bos02.scalingstack" %}
+{%-   set domain_bos03 = "vbuilder.bos03.scalingstack" %}
+{%-   set domain_lcy02 = "vbuilder.lcy02.scalingstack" %}
+{%-   set extra_constraints = "root-disk-source=volume" %}
+{%-   set gss_series = "focal|jammy" %}
+{%-   set instance_key_name_bos01 = "vbuilder-manage-bos01" %}
+{%-   set instance_key_name_bos02 = "vbuilder-manage-bos02" %}
+{%-   set instance_key_name_bos03 = "vbuilder-manage-bos03" %}
+{%-   set instance_key_name_lcy02 = "vbuilder-manage-lcy02" %}
+{%-   set instance_network_bos01 = "10.189.16.0/20" %}
+{%-   set instance_network_bos02 = "10.189.144.0/20" %}
+{%-   set instance_network_bos03 = "10.143.0.0/20" %}
+{%-   set instance_network_lcy02 = "10.133.0.0/16" %}
+{%-   set instance_router = "vbuilder_router" %}
+{%-   set instance_router_bos03 = "router_launchpad-vbuilder-production" %}
+{%-   set instance_router_lcy02 = "router_launchpad-vbuilder-production" %}
+{#-   Output of "openstack image show -c id -f value qemu-riscv64-uboot". #}
+{%-   set kernel_id_bos03_riscv64 = "c5015509-91c3-4684-80cc-b8a2ef44c8b5" %}
+{%-   set launchpad_buildd_repository = "ppa:launchpad/buildd" %}
+{%-   set lp_buildd_managers = "10.131.66.156 10.131.215.202" %}
+{%-   set lp_environment = "production" %}
+{%-   set lp_sshkey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQD3tBvyEaZFX8H4krfXGhczq9q/GCfcE0kpsjK8WzM/lQOhWQAVSmm8O9gposKvu4mT28hBWI746+NOPLBrf9ax7YRoU+ZuZesyIIK6ztN07G4aK2vt+1W9yNZKgTJZ8FvxHwlbFy6wMjMP3OzuxWyh0yi03z1YiGPJt0riJRZT+QecaoyYhkTuRbcCoWbhmM0veUjCvfR7LA43YbOfmts7STNCtl1IUJErmUY6fSR1LkyF9uJSdwozAVht242vI8Yg+PrOnKCx3X5w546okP2uMDVXKWeY2g/MhUSr+ZyIaS2JxUygZ7FKS4gNLYb3O4Q6tcIwPE++zsr9HueTfjw2LyeQTZQDQjuv0kJ40B3IFF4E9FMzu8MDwArOKUGzISLrX5VtDZBKINDclfDbrFWHUlVBC2CIIjGPKj3afluP9jadWjsOQx0ooBO0tb0Se+7t3oNjKlknwD85AYCPzIlZt7wo/+e/V/Tilw+UUf00JioEceTdxuOHmbuxP5RzCleg8pYLfe5jiHfBJi0DhO61IYlFMgSToh78EShHWYEdSbD+Ve5GWKvPEU9CsSTleSSqBFdhyggfB5fHiup0efAqMnstDn5sxGYFzfFu7SNVHkMuFeiRHZK+9fF/nfzk0UK5EaGs+4IMhWS1ns8m5O5Li609c/nXK5t5fBOkiWa+RQ== lp_buildd@juju-4112d9-prod-launchpad-manual-servers-4" %}
+{%-   set modifiers_bos01 = '{"arm64": "10.43.0.29", "ppc64el": "10.43.0.36", "s390x": "10.43.0.39"}' %}
+{%-   set modifiers_bos02 = '{"arm64": "10.44.0.22", "ppc64el": "10.44.0.20", "s390x": "10.44.0.18"}' %}
+{%-   set modifiers_bos03 = '{"arm64": "10.143.254.129", "riscv64": "10.143.254.223"}' %}
+{%-   set name_prefix = "launchpad-buildd" %}
+{%-   set openstack_tenant_name = "vbuilder_project" %}
+{%-   set openstack_tenant_name_bos03 = "launchpad-vbuilder-production_project" %}
+{%-   set openstack_tenant_name_lcy02 = "launchpad-vbuilder-production_project" %}
+{%-   set openstack_username = "vbuilder" %}
+{%-   set openstack_username_bos03 = "launchpad-vbuilder-production" %}
+{%-   set openstack_username_lcy02 = "launchpad-vbuilder-production" %}
+{%-   set vbuilders_bos01 = {"amd64": {"series": "focal", "flavor": "vbuilder-gpu", "count": 5}, "arm64": {"series": "focal", "count": 40, "config_drive": false}, "arm64-gpu": {"arch_base": "arm64", "arch_suffix": "-gpu", "series": "focal", "flavor": "vbuilder-nvidia-l4", "count": 2, "config_drive": false}, "ppc64el": {"series": "focal", "count": 20}, "s390x": {"series": "focal", "count": 20}} %}
+{%-   set vbuilders_bos02 = {"arm64": {"series": "focal", "count": 80, "config_drive": false}, "ppc64el": {"series": "focal", "count": 30}, "s390x": {"series": "focal", "count": 20}} %}
+{%-   set vbuilders_bos03 = {"amd64": {"series": "focal", "count": 60}, "arm64": {"series": "focal", "count": 40, "config_drive": false, "flavor": "vbuilder-arm64"}, "riscv64": {"series": "jammy", "count": 120, "config_drive": false}} %}
+{%-   set vbuilders_lcy02 = {"amd64": {"series": "focal", "count": 120}} %}
+{%-   set vbuilder_prefix = "" %}
+{%- elif stage_name == "staging" %}
+{#-   This environment is confusingly named, and is actually connected to Launchpad dogfood. #}
+{%-   set clamav_database_url = "http://clamav-database-mirror.staging.lp.internal/"; %}
+{%-   set content_id_template = "launchpad-buildd:staging" %}
+{%-   set dns_update_host_bos01 = "10.189.0.2" %}
+{%-   set dns_update_host_bos02 = "10.189.128.2" %}
+{%-   set dns_update_host_bos03 = "10.189.128.2" %}
+{%-   set dns_update_host_lcy02 = "10.132.31.11 10.132.31.12 10.132.31.13" %}
+{%-   set dns_update_key_name = "vbuilder-staging-manage" %}
+{%-   set domain_bos01 = "vbuilder.staging.bos01.scalingstack" %}
+{%-   set domain_bos02 = "vbuilder.staging.bos02.scalingstack" %}
+{%-   set domain_bos03 = "vbuilder.staging.bos03.scalingstack" %}
+{%-   set domain_lcy02 = "vbuilder.staging.lcy02.scalingstack" %}
+{%-   set extra_constraints = "" %}
+{%-   set gss_series = "focal|jammy" %}
+{%-   set instance_key_name_bos01 = "ppa-manage-test" %}
+{%-   set instance_key_name_bos02 = "ppa-manage-test" %}
+{%-   set instance_key_name_bos03 = "ppa-manage-test" %}
+{%-   set instance_key_name_lcy02 = "ppa-manage-test" %}
+{%-   set instance_network_bos01 = "10.189.34.0/24" %}
+{%-   set instance_network_bos02 = "10.189.162.0/23" %}
+{%-   set instance_network_bos03 = "10.144.2.0/23" %}
+{%-   set instance_network_lcy02 = "10.134.2.0/23" %}
+{%-   set instance_router = "vbuilder_staging_router" %}
+{%-   set instance_router_bos03 = "router_launchpad-vbuilder-staging" %}
+{%-   set instance_router_lcy02 = "router_launchpad-vbuilder-staging" %}
+{%-   set launchpad_buildd_repository = "ppa:launchpad/buildd-staging" %}
+{%-   set lp_buildd_managers = "91.189.90.132" %}
+{%-   set lp_environment = "dogfood" %}
+{%-   set lp_sshkey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDB3Qw9541hRDiGVLGxZm/afNsCirlM6Wa2/VkdU22KQfVu579+ek8fdgvR/si7UOTDgE4j7DGuJW+pk7z6T08Iy5feaI3JpnZV7PX0Qp8CilrcTNWPL1eBoq5HcYDzl+zyXT341l7GBFhwYQ50sF3hq0RV0XvUwxfheyBtdmzkiVE1LXT7kdFvXtxe0fR9ypw+NuMRqqFyZ9w3tee7zclDw1cCcnDf6vmIXbYLF9yNZOQQhwYQFFgIUepdkUg2onyhYXWKj8mooFVGne0WPVTJ5Sz805soh9SuUGGgpTh70EtgpJ1nxSWGtIWUtNc6mSGdZzGtgVTnbDk04J4FrVX3Bu8yetlQbNPPYuxdqaZP1anoKmgtCIhfe+xCkim5YLc+WZXVRGvk6apCLXMnj9ZhRE7fCKQO/F+aNPCONv0gUVncxuWAyiqdRuilqSA7VTEMYTv7pIYSNOjpD5eMIX2wGkYTjEXopGJouUH2nOXlhsGgssmMepSVJhOJKY1Cfq0ND4ydoDd2Mz1Yj+Us9HToqJU6DD1sAIKOV05fBqVsJEJbctI2vpRY/R1nCBySpM4KpzgkCQWwjkjR8h2/nuwDtyMsJe/BdBuDyRwJGnBLNgBUg+tnWl9yePz/ZXVGrAI7gPuc9DIiuKEvEmiO3o9yfRRjsFMWGIk2y/Hfa01V7w== launchpad@labbu" %}
+{%-   set modifiers_bos01 = '{"arm64": "10.43.0.10", "ppc64el": "10.43.0.23", "s390x": "10.43.0.15"}' %}
+{%-   set modifiers_bos02 = '{"arm64": "10.44.0.13", "ppc64el": "10.44.0.19", "s390x": "10.44.0.14"}' %}
+{%-   set name_prefix = "launchpad-buildd-staging" %}
+{%-   set openstack_tenant_name = "vbuilder_staging_project" %}
+{%-   set openstack_tenant_name_bos03 = "launchpad-vbuilder-staging_project" %}
+{%-   set openstack_tenant_name_lcy02 = "launchpad-vbuilder-staging_project" %}
+{%-   set openstack_username = "vbuilder_staging" %}
+{%-   set openstack_username_bos03 = "launchpad-vbuilder-staging" %}
+{%-   set openstack_username_lcy02 = "launchpad-vbuilder-staging" %}
+{%-   set vbuilders_bos01 = {"amd64": {"series": "jammy", "flavor": "vbuilder-gpu", "count": 1}, "arm64": {"series": "jammy", "count": 1, "config_drive": false}, "arm64-gpu": {"arch_base": "arm64", "arch_suffix": "-gpu", "series": "jammy", "flavor": "vbuilder-nvidia-l4", "count": 1, "config_drive": false}, "ppc64el": {"series": "jammy", "count": 1}, "s390x": {"series": "jammy", "count": 1}} %}
+{%-   set vbuilders_bos02 = {"arm64": {"series": "jammy", "count": 1, "config_drive": false}, "ppc64el": {"series": "jammy", "count": 1}, "s390x": {"series": "jammy", "count": 1}} %}
+{%-   set vbuilders_bos03 = {"amd64": {"series": "jammy", "count": 4}} %}
+{%-   set vbuilders_lcy02 = {"amd64": {"series": "jammy", "count": 4}} %}
+{%-   set vbuilder_prefix = "dogfood-" %}
+{%- elif stage_name == "qastaging" %}
+{%-   set clamav_database_url = "http://clamav-database-mirror.staging.lp.internal/"; %}
+{%-   set content_id_template = "launchpad-buildd:qastaging" %}
+{%-   set dns_update_host_bos01 = "10.189.0.2" %}
+{%-   set dns_update_host_bos02 = "10.189.128.2" %}
+{%-   set dns_update_host_bos03 = "10.189.128.2" %}
+{%-   set dns_update_host_lcy02 = "10.132.31.11 10.132.31.12 10.132.31.13" %}
+{%-   set dns_update_key_name = "vbuilder-staging-manage" %}
+{%-   set domain_bos01 = "vbuilder.qastaging.bos01.scalingstack" %}
+{%-   set domain_bos02 = "vbuilder.qastaging.bos02.scalingstack" %}
+{%-   set domain_bos03 = "vbuilder.qastaging.bos03.scalingstack" %}
+{%-   set domain_lcy02 = "vbuilder.qastaging.lcy02.scalingstack" %}
+{%-   set extra_constraints = "" %}
+{%-   set gss_series = "focal|jammy" %}
+{%-   set instance_key_name_bos01 = "ppa-manage-test-qastaging" %}
+{%-   set instance_key_name_bos02 = "ppa-manage-test-qastaging" %}
+{%-   set instance_key_name_bos03 = "ppa-manage-test-qastaging" %}
+{%-   set instance_key_name_lcy02 = "ppa-manage-test-qastaging" %}
+{%-   set instance_network_bos01 = "10.189.36.0/24" %}
+{%-   set instance_network_bos02 = "10.189.164.0/23" %}
+{%-   set instance_network_bos03 = "10.144.4.0/23" %}
+{%-   set instance_network_lcy02 = "10.134.4.0/23" %}
+{%-   set instance_router = "vbuilder_staging_router" %}
+{%-   set instance_router_bos03 = "router_launchpad-vbuilder-staging" %}
+{%-   set instance_router_lcy02 = "router_launchpad-vbuilder-staging" %}
+{#-   Output of "openstack image show -c id -f value qemu-riscv64-uboot". #}
+{%-   set kernel_id_bos03_riscv64 = "bcbb013b-9424-4f61-9fda-6374d29d3ee0" %}
+{%-   set launchpad_buildd_repository = "ppa:launchpad/buildd-staging" %}
+{%-   set lp_buildd_managers = "10.132.54.242" %}
+{%-   set lp_environment = "qastaging" %}
+{%-   set lp_sshkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFrjt0yytzrK9fQuG+6VgE6QStUbDmunlN7+Lv5XhmoL stg-launchpad@launchpad-bastion-ps5" %}
+{%-   set modifiers_bos01 = '{"arm64": "10.43.0.10", "ppc64el": "10.43.0.23", "s390x": "10.43.0.15"}' %}
+{%-   set modifiers_bos02 = '{"arm64": "10.44.0.13", "ppc64el": "10.44.0.19", "s390x": "10.44.0.14"}' %}
+{%-   set modifiers_bos03 = '{"amd64": "10.144.0.206", "arm64": "10.144.0.127", "riscv64": "10.144.0.114"}' %}
+{%-   set name_prefix = "launchpad-buildd-qastaging" %}
+{%-   set openstack_tenant_name = "vbuilder_staging_project" %}
+{%-   set openstack_tenant_name_bos03 = "launchpad-vbuilder-staging_project" %}
+{%-   set openstack_tenant_name_lcy02 = "launchpad-vbuilder-staging_project" %}
+{%-   set openstack_username = "vbuilder_staging" %}
+{%-   set openstack_username_bos03 = "launchpad-vbuilder-staging" %}
+{%-   set openstack_username_lcy02 = "launchpad-vbuilder-staging" %}
+{%-   set vbuilders_bos01 = {"amd64": {"series": "focal", "flavor": "vbuilder-gpu", "count": 1}, "arm64": {"series": "focal", "count": 1, "config_drive": false}, "arm64-gpu": {"arch_base": "arm64", "arch_suffix": "-gpu", "series": "focal", "flavor": "vbuilder-nvidia-l4", "count": 1, "config_drive": false}, "ppc64el": {"series": "focal", "count": 1}, "s390x": {"series": "focal", "count": 1}} %}
+{%-   set vbuilders_bos02 = {"arm64": {"series": "focal", "count": 1, "config_drive": false}, "ppc64el": {"series": "focal", "count": 1}, "s390x": {"series": "focal", "count": 1}} %}
+{%-   set vbuilders_bos03 = {"amd64": {"series": "focal", "count": 4}, "amd64-gpu": {"arch_base": "amd64", "arch_suffix": "-gpu", "series": "focal", "flavor": "vbuilder-gpu", "count": 1}, "arm64": {"series": "focal", "count": 1, "config_drive": false, "flavor": "vbuilder-arm64"}, "riscv64": {"series": "jammy", "count": 1, "config_drive": false}} %}
+{%-   set vbuilders_lcy02 = {"amd64": {"series": "focal", "count": 4}} %}
+{%-   set vbuilder_prefix = "qastaging-" %}
+{%- endif %}
+
+{%- macro vbuilder_hostnames(prefix, count) %}
+{%-   set hostname_sep = joiner(", ") -%}
+[{% for i in range(1, count + 1) -%}
+{{ hostname_sep() }}"{{ prefix }}-{{ "%03d" % i }}"
+{%- endfor %}]
+{%- endmacro %}
+
+{%- macro vbuilders(region, arches) %}
+{%-   set arch_sep = joiner(", ") -%}
+[{% for arch, properties in arches|dictsort -%}
+{{ arch_sep() }}{"image_name_prefix": "{{ name_prefix }}{{ properties.get('arch_suffix', '') }}/ubuntu-{{ properties['series'] }}-daily-{{ properties.get('arch_base', arch) }}-", "instance_flavor": "{{ properties.get('flavor', 'vbuilder') }}", "hostnames": {{ vbuilder_hostnames("%s%s-%s" % (vbuilder_prefix, region, arch), properties['count']) }}, "config_drive": {{ properties.get('config_drive', True)|tojson }}}
+{%- endfor %}]
+{%- endmacro -%}
+
+series: "{{ series }}"
+applications:
+  rabbitmq-server:
+    charm: cs:rabbitmq-server
+    constraints: "cores=2 mem=8G root-disk=20G {{ extra_constraints }}"
+    num_units: 2
+  glance-simplestreams-sync-bos01-amd64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(x86_64|amd64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos01.scalingstack:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos01
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos01-arm64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm hw_firmware_type=uefi'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 2, item_filters: ["release~({{ gss_series }})", "arch~(arm64|aarch64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos01.scalingstack:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos01
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos01-arm64-gpu:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}:gpu"
+      custom_properties: 'hypervisor_type=kvm hw_firmware_type=uefi'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 2, item_filters: ["release~({{ gss_series }})", "arch~(arm64|aarch64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}-gpu/"
+      openstack-auth-url: "http://keystone.infra.bos01.scalingstack:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos01
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos01-ppc64el:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 2, item_filters: ["release~({{ gss_series }})", "arch~(ppc64el)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos01.scalingstack:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos01
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos01-s390x:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(s390x)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos01.scalingstack:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos01
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos02-arm64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm hw_firmware_type=uefi'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 2, item_filters: ["release~({{ gss_series }})", "arch~(arm64|aarch64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos02.scalingstack:5000/v2.0";
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos02
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos02-ppc64el:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 2, item_filters: ["release~({{ gss_series }})", "arch~(ppc64el)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos02.scalingstack:5000/v2.0";
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos02
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos02-s390x:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: 'hypervisor_type=kvm'
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(s390x)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "http://keystone.infra.bos02.scalingstack:5000/v2.0";
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      region: scalingstack-bos02
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos03-amd64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(x86_64|amd64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "https://keystone.ps6.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_bos03 }}"
+      openstack-username: "{{ openstack_username_bos03 }}"
+      region: scalingstack-bos03
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos03-amd64-gpu:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}:gpu"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(x86_64|amd64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}-gpu/"
+      openstack-auth-url: "https://keystone.ps6.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_bos03 }}"
+      openstack-username: "{{ openstack_username_bos03 }}"
+      region: scalingstack-bos03
+      use_swift: false
+      visibility: private
+  glance-simplestreams-sync-bos03-arm64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: "hypervisor_type=kvm hw_firmware_type=uefi"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(arm64|aarch64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "https://keystone.ps6.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_bos03 }}"
+      openstack-username: "{{ openstack_username_bos03 }}"
+      region: scalingstack-bos03
+      use_swift: false
+      visibility: private
+{%- if stage_name in ("production", "qastaging") %}
+  glance-simplestreams-sync-bos03-riscv64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      custom_properties: "hw_emulation_architecture=riscv64 hw_machine_type=virt kernel_id={{ kernel_id_bos03_riscv64 }}"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(riscv64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "https://keystone.ps6.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_bos03 }}"
+      openstack-username: "{{ openstack_username_bos03 }}"
+      region: scalingstack-bos03
+      use_swift: false
+      visibility: private
+{%- endif %}
+  glance-simplestreams-sync-lcy02-amd64:
+    charm: {{ charm_dir }}/glance-simplestreams-sync
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      content_id_template: "{{ content_id_template }}"
+      mirror_list: |-
+        [{url: "http://cloud-images.ubuntu.com/daily/";, name_prefix: "ubuntu:released", path: "streams/v1/index.sjson", max: 3, item_filters: ["release~({{ gss_series }})", "arch~(x86_64|amd64)", "ftype~(disk1.img|disk.img)"]}]
+      name_prefix: "{{ name_prefix }}/"
+      openstack-auth-url: "https://keystone.ps5.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_lcy02 }}"
+      openstack-username: "{{ openstack_username_lcy02 }}"
+      region: scalingstack-lcy02
+      use_swift: false
+      visibility: private
+  launchpad-buildd-image-modifier-amd64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+  launchpad-buildd-image-modifier-bos01-amd64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      extra-keys: "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2\n\nmI0ESUm55wEEALrxow0PCnGeCAebH9g5+wtZBfXZdx2vZts+XsTTHxDRsMNgMC9b\n0klCgbydvkmF9WCphCjQ61Wp/Bh0C7DSXVCpA/xs55QB5VCUceIMZCbMTPq1h7Ht\ncA1f+o6+OCPUntErG6eGize6kGhdjBNPOT+q4BSIL69rPuwfM9ZyAYcBABEBAAG0\nJkxhdW5jaHBhZCBQUEEgZm9yIExhdW5jaHBhZCBEZXZlbG9wZXJziLYEEwECACAF\nAklJuecCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAtH/tsClF0rxsQA/0Q\nw0Yk+xIA1xibyf+UCF9/4fXzdo/tr76qxPRyFiv0uLbFOmW6t26jzpWBHocCHcCU\n57l7rlcEzIHFMcS9Ol6MughP4lhywf9ceeqg2SD6AXjZ0iFarwkueTcHwff5j0lG\nIzzCUVTYJ+m79f/r0dfctL2DwnX7JnT/41mEuR1qbokBHAQQAQIABgUCTB7s7wAK\nCRDFXO8hUqH8T94pCACxl/Gdo82N01H82HvNBa8zQFixNQIwNJN/VxH3WfRvissW\nOMTJnTnNOQErxUhqHrasvZf3djNoHeKRNToTTBaGiEwoySmEK05i4Toq74jWAOs6\nflD2S8natWbobK5V+B2pXZl5g/4Ay21C3H1sZlUxDCcOH9Jh8/0feAZHoSQ/V1Xa\nrEPb+TGdV0hP3Yp7+nIT91sYkj566kA8fjoxJrY/EvXGn98bhYMbMNbtS1Z0WeGp\nzG2hiL6wLSLBxz4Ae9MShOMwNyC1zmr/d1wlF0Efx1N9HaRtRq2s/zqH+ebB7Sr+\nV+SquObb0qr4eAjtslN5BxWROhf+wZM6WJO0Z6nBiQEcBBABAgAGBQJTHvsiAAoJ\nEIngjfAzAr5Z8y4H/jltxz5OwHIDoiXsyWnpjO1SZUV6I6evKpSD7huYtd7MwFZC\n0CgExsPPqLNQCUxITR+9jlqofi/QsTwP7Qq55VmIrKLrZ9KCK1qBnMa/YEXi6TeK\n65lnyN6lNOdzhcsBm3s1/U9ewWp1vsw4UAclmu6tI8GUko+e32K1QjMtIjeVejQl\nJCYDjuxfHhcFWyRo0TWu24F6VD3YxBHpne/M00yd2mLLpHdQrxw/vbvVhZkRDutQ\nemKRA81ZM2WZ1iqYOXtEs5VrD/PtU0nvSAowgeWBmcOwWn3Om+pVsnSoFo46CDvo\nC6YXOWMOMFIxfVhPWqlBkWQsnXFzgk/Xyo4vlTY=\n=Wq6H\n-----END PGP PUBLIC KEY BLOCK-----"
+      extra-packages: "nvidia-headless-525-grid nvidia-utils-525-grid"
+      # extra-sources must also be set in the secrets file, to add
+      # ppa:launchpad/ubuntu/buildd-gpu; that requires an authentication
+      # token.
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+  launchpad-buildd-image-modifier-bos01-arm64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false compat_uts_machine=armv7l"
+      remote-modifiers: '{{ modifiers_bos01 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos01-arm64-gpu:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      extra-packages: "nvidia-headless-525-server nvidia-utils-525-server"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false compat_uts_machine=armv7l"
+      remote-modifiers: '{{ modifiers_bos01 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos01-ppc64el:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+      remote-modifiers: '{{ modifiers_bos01 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos01-s390x:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+      remote-modifiers: '{{ modifiers_bos01 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos02-arm64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false compat_uts_machine=armv7l"
+      remote-modifiers: '{{ modifiers_bos02 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos02-ppc64el:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+      remote-modifiers: '{{ modifiers_bos02 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos02-s390x:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+      remote-modifiers: '{{ modifiers_bos02 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+  launchpad-buildd-image-modifier-bos03-amd64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+  launchpad-buildd-image-modifier-bos03-amd64-gpu:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      # We use here `-grid` instead of `-server` due to compatibility with
+      # amd64 (see https://launchpad.net/~launchpad/+archive/ubuntu/buildd-gpu)
+      extra-keys: "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2\n\nmI0ESUm55wEEALrxow0PCnGeCAebH9g5+wtZBfXZdx2vZts+XsTTHxDRsMNgMC9b\n0klCgbydvkmF9WCphCjQ61Wp/Bh0C7DSXVCpA/xs55QB5VCUceIMZCbMTPq1h7Ht\ncA1f+o6+OCPUntErG6eGize6kGhdjBNPOT+q4BSIL69rPuwfM9ZyAYcBABEBAAG0\nJkxhdW5jaHBhZCBQUEEgZm9yIExhdW5jaHBhZCBEZXZlbG9wZXJziLYEEwECACAF\nAklJuecCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAtH/tsClF0rxsQA/0Q\nw0Yk+xIA1xibyf+UCF9/4fXzdo/tr76qxPRyFiv0uLbFOmW6t26jzpWBHocCHcCU\n57l7rlcEzIHFMcS9Ol6MughP4lhywf9ceeqg2SD6AXjZ0iFarwkueTcHwff5j0lG\nIzzCUVTYJ+m79f/r0dfctL2DwnX7JnT/41mEuR1qbokBHAQQAQIABgUCTB7s7wAK\nCRDFXO8hUqH8T94pCACxl/Gdo82N01H82HvNBa8zQFixNQIwNJN/VxH3WfRvissW\nOMTJnTnNOQErxUhqHrasvZf3djNoHeKRNToTTBaGiEwoySmEK05i4Toq74jWAOs6\nflD2S8natWbobK5V+B2pXZl5g/4Ay21C3H1sZlUxDCcOH9Jh8/0feAZHoSQ/V1Xa\nrEPb+TGdV0hP3Yp7+nIT91sYkj566kA8fjoxJrY/EvXGn98bhYMbMNbtS1Z0WeGp\nzG2hiL6wLSLBxz4Ae9MShOMwNyC1zmr/d1wlF0Efx1N9HaRtRq2s/zqH+ebB7Sr+\nV+SquObb0qr4eAjtslN5BxWROhf+wZM6WJO0Z6nBiQEcBBABAgAGBQJTHvsiAAoJ\nEIngjfAzAr5Z8y4H/jltxz5OwHIDoiXsyWnpjO1SZUV6I6evKpSD7huYtd7MwFZC\n0CgExsPPqLNQCUxITR+9jlqofi/QsTwP7Qq55VmIrKLrZ9KCK1qBnMa/YEXi6TeK\n65lnyN6lNOdzhcsBm3s1/U9ewWp1vsw4UAclmu6tI8GUko+e32K1QjMtIjeVejQl\nJCYDjuxfHhcFWyRo0TWu24F6VD3YxBHpne/M00yd2mLLpHdQrxw/vbvVhZkRDutQ\nemKRA81ZM2WZ1iqYOXtEs5VrD/PtU0nvSAowgeWBmcOwWn3Om+pVsnSoFo46CDvo\nC6YXOWMOMFIxfVhPWqlBkWQsnXFzgk/Xyo4vlTY=\n=Wq6H\n-----END PGP PUBLIC KEY BLOCK-----"
+      extra-packages: "nvidia-headless-525-grid nvidia-utils-525-grid"
+      # extra-sources must also be set in the secrets file, to add
+      # ppa:launchpad/ubuntu/buildd-gpu; that requires an authentication
+      # token.
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+  launchpad-buildd-image-modifier-bos03-arm64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false compat_uts_machine=armv7l"
+      remote-modifiers: '{{ modifiers_bos03 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+{%- if stage_name in ("production", "qastaging") %}
+  launchpad-buildd-image-modifier-bos03-riscv64:
+    charm: {{ charm_dir }}/launchpad-buildd-image-modifier
+    options:
+      clamav-database-url: "{{ clamav_database_url }}"
+      launchpad-buildd-repository: "{{ launchpad_buildd_repository }}"
+      linux-command-line-extra: "systemd.unified_cgroup_hierarchy=false"
+      # XXX cjwatson 2023-11-15: jammy defaults to 5.0/stable, which has
+      # some race conditions affecting Launchpad builds.  Change this to a
+      # more stable channel (e.g. 5.20/stable) once one exists that contains
+      # https://github.com/canonical/lxd/pull/12530.
+      lxd-channel: "latest/candidate"
+      remote-modifiers: '{{ modifiers_bos03 }}'
+      remote-modifier-private-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder
+      remote-modifier-public-key: include-base64://{{ local_dir }}/id_rsa.imagebuilder.pub
+      sbuild-stalled-package-timeout: 1500
+{%- endif %}
+  vbuilder-manage-bos01:
+    charm: {{ charm_dir }}/vbuilder-manage
+    constraints: "cores=2 mem=8G root-disk=20G {{ extra_constraints }}"
+    expose: true
+    num_units: 1
+    options:
+      amqp-username: vbuilder-manage-bos01
+      amqp-vhost: vbuilder-manage-bos01
+      celery-worker-count: "32"
+      dns-update-host: "{{ dns_update_host_bos01 }}"
+      dns-update-key-name: "{{ dns_update_key_name }}"
+      domain: "{{ domain_bos01 }}"
+      instance-key-name: "{{ instance_key_name_bos01 }}"
+      instance-network: "{{ instance_network_bos01 }}"
+      instance-router: "{{ instance_router }}"
+      log-hosts-allow: "{{ log_hosts_allow }}"
+      lp-buildd-managers: "{{ lp_buildd_managers }}"
+      lp-environment: "{{ lp_environment }}"
+      lp-sshkey: "{{ lp_sshkey }}"
+      openstack-auth-url: http://keystone.infra.bos01.scalingstack:5000/v3
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      vbuilders: '{{ vbuilders("bos01", vbuilders_bos01) }}'
+  vbuilder-manage-bos02:
+    charm: {{ charm_dir }}/vbuilder-manage
+    constraints: "cores=2 mem=8G root-disk=20G {{ extra_constraints }}"
+    expose: true
+    num_units: 1
+    options:
+      amqp-username: vbuilder-manage-bos02
+      amqp-vhost: vbuilder-manage-bos02
+      celery-worker-count: "50"
+      dns-update-host: "{{ dns_update_host_bos02 }}"
+      dns-update-key-name: "{{ dns_update_key_name }}"
+      domain: "{{ domain_bos02 }}"
+      instance-key-name: "{{ instance_key_name_bos02 }}"
+      instance-network: "{{ instance_network_bos02 }}"
+      instance-router: "{{ instance_router }}"
+      log-hosts-allow: "{{ log_hosts_allow }}"
+      lp-buildd-managers: "{{ lp_buildd_managers }}"
+      lp-environment: "{{ lp_environment }}"
+      lp-sshkey: "{{ lp_sshkey }}"
+      openstack-auth-url: http://keystone.infra.bos02.scalingstack:5000/v2.0
+      openstack-tenant-name: "{{ openstack_tenant_name }}"
+      openstack-username: "{{ openstack_username }}"
+      vbuilders: '{{ vbuilders("bos02", vbuilders_bos02) }}'
+  vbuilder-manage-bos03:
+    charm: {{ charm_dir }}/vbuilder-manage
+    constraints: "cores=4 mem=8G root-disk=50G {{ extra_constraints }}"
+    expose: true
+    num_units: 1
+    options:
+      amqp-username: vbuilder-manage-bos03
+      amqp-vhost: vbuilder-manage-bos03
+      celery-worker-count: "50"
+      dns-update-host: "{{ dns_update_host_bos03 }}"
+      dns-update-key-name: "{{ dns_update_key_name }}"
+      domain: "{{ domain_bos03 }}"
+      instance-key-name: "{{ instance_key_name_bos03 }}"
+      instance-network: "{{ instance_network_bos03 }}"
+      instance-router: "{{ instance_router_bos03 }}"
+      log-hosts-allow: "{{ log_hosts_allow }}"
+      lp-buildd-managers: "{{ lp_buildd_managers }}"
+      lp-environment: "{{ lp_environment }}"
+      lp-sshkey: "{{ lp_sshkey }}"
+      openstack-auth-url: "https://keystone.ps6.canonical.com:5000/v3";
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_bos03 }}"
+      openstack-username: "{{ openstack_username_bos03 }}"
+      vbuilders: '{{ vbuilders("bos03", vbuilders_bos03) }}'
+  vbuilder-manage-lcy02:
+    charm: {{ charm_dir }}/vbuilder-manage
+    constraints: "cores=4 mem=8G root-disk=50G {{ extra_constraints }}"
+    expose: true
+    num_units: 1
+    options:
+      amqp-username: vbuilder-manage-lcy02
+      amqp-vhost: vbuilder-manage-lcy02
+      celery-worker-count: "50"
+      dns-update-host: "{{ dns_update_host_lcy02 }}"
+      dns-update-key-name: "{{ dns_update_key_name }}"
+      domain: "{{ domain_lcy02 }}"
+      instance-key-name: "{{ instance_key_name_lcy02 }}"
+      instance-network: "{{ instance_network_lcy02 }}"
+      instance-router: "{{ instance_router_lcy02 }}"
+      log-hosts-allow: "{{ log_hosts_allow }}"
+      lp-buildd-managers: "{{ lp_buildd_managers }}"
+      lp-environment: "{{ lp_environment }}"
+      lp-sshkey: "{{ lp_sshkey }}"
+      openstack-auth-url: https://keystone.ps5.canonical.com:5000/v3
+      openstack-identity-api-version: "3"
+      openstack-tenant-name: "{{ openstack_tenant_name_lcy02 }}"
+      openstack-username: "{{ openstack_username_lcy02 }}"
+      vbuilders: '{{ vbuilders("lcy02", vbuilders_lcy02) }}'
+  clamav-database-mirror:
+    charm: ch:clamav-database-mirror
+    series: jammy
+    constraints: "{{ extra_constraints }}"
+    num_units: 1
+    options:
+      http-proxy: "http://squid.internal:3128/";
+  ntp:
+    charm: cs:ntp
+    options:
+      source: "ntp1.canonical.com ntp2.canonical.com ntp3.canonical.com ntp4.canonical.com"
+  # We need to configure telegraf explicitly here because we use custom
+  # plugins.  The subordinates spec will set up the relations.
+  telegraf:
+    charm: ch:telegraf
+    channel: candidate
+    expose: true
+    options:
+      extra_plugins: |-
+        [[inputs.procstat]]
+          pattern = "celery.*--app ppareset"
+        [[inputs.procstat]]
+          pattern = "/usr/local/sbin/ppa-reset"
+      install_method: snap
+relations:
+  - ["glance-simplestreams-sync-bos01-amd64:image-modifier", "launchpad-buildd-image-modifier-bos01-amd64:image-modifier"]
+  - ["glance-simplestreams-sync-bos01-arm64:image-modifier", "launchpad-buildd-image-modifier-bos01-arm64:image-modifier"]
+  - ["glance-simplestreams-sync-bos01-arm64-gpu:image-modifier", "launchpad-buildd-image-modifier-bos01-arm64-gpu:image-modifier"]
+  - ["glance-simplestreams-sync-bos01-ppc64el:image-modifier", "launchpad-buildd-image-modifier-bos01-ppc64el:image-modifier"]
+  - ["glance-simplestreams-sync-bos01-s390x:image-modifier", "launchpad-buildd-image-modifier-bos01-s390x:image-modifier"]
+  - ["glance-simplestreams-sync-bos02-arm64:image-modifier", "launchpad-buildd-image-modifier-bos02-arm64:image-modifier"]
+  - ["glance-simplestreams-sync-bos02-ppc64el:image-modifier", "launchpad-buildd-image-modifier-bos02-ppc64el:image-modifier"]
+  - ["glance-simplestreams-sync-bos02-s390x:image-modifier", "launchpad-buildd-image-modifier-bos02-s390x:image-modifier"]
+  - ["glance-simplestreams-sync-bos03-amd64:image-modifier", "launchpad-buildd-image-modifier-bos03-amd64:image-modifier"]
+  - ["glance-simplestreams-sync-bos03-amd64-gpu:image-modifier", "launchpad-buildd-image-modifier-bos03-amd64-gpu:image-modifier"]
+  - ["glance-simplestreams-sync-bos03-arm64:image-modifier", "launchpad-buildd-image-modifier-bos03-arm64:image-modifier"]
+{%- if stage_name in ("production", "qastaging") %}
+  - ["glance-simplestreams-sync-bos03-riscv64:image-modifier", "launchpad-buildd-image-modifier-bos03-riscv64:image-modifier"]
+{%- endif %}
+  - ["glance-simplestreams-sync-lcy02-amd64:image-modifier", "launchpad-buildd-image-modifier-amd64:image-modifier"]
+  - ["vbuilder-manage-bos01:amqp", "rabbitmq-server:amqp"]
+  - ["vbuilder-manage-bos02:amqp", "rabbitmq-server:amqp"]
+  - ["vbuilder-manage-bos03:amqp", "rabbitmq-server:amqp"]
+  - ["vbuilder-manage-lcy02:amqp", "rabbitmq-server:amqp"]
+  - ["rabbitmq-server", "ntp"]
+  - ["glance-simplestreams-sync-bos01-amd64", "ntp"]
+  - ["glance-simplestreams-sync-bos01-arm64", "ntp"]
+  - ["glance-simplestreams-sync-bos01-arm64-gpu", "ntp"]
+  - ["glance-simplestreams-sync-bos01-ppc64el", "ntp"]
+  - ["glance-simplestreams-sync-bos01-s390x", "ntp"]
+  - ["glance-simplestreams-sync-bos02-arm64", "ntp"]
+  - ["glance-simplestreams-sync-bos02-ppc64el", "ntp"]
+  - ["glance-simplestreams-sync-bos02-s390x", "ntp"]
+  - ["glance-simplestreams-sync-bos03-amd64", "ntp"]
+  - ["glance-simplestreams-sync-bos03-amd64-gpu", "ntp"]
+  - ["glance-simplestreams-sync-bos03-arm64", "ntp"]
+{%- if stage_name in ("production", "qastaging") %}
+  - ["glance-simplestreams-sync-bos03-riscv64", "ntp"]
+{%- endif %}
+  - ["glance-simplestreams-sync-lcy02-amd64", "ntp"]
+  - ["vbuilder-manage-bos01", "ntp"]
+  - ["vbuilder-manage-bos02", "ntp"]
+  - ["vbuilder-manage-bos03", "ntp"]
+  - ["vbuilder-manage-lcy02", "ntp"]
+  - ["clamav-database-mirror", "ntp"]
diff --git a/vbuilder/collect b/vbuilder/collect
new file mode 100644
index 0000000..2deb9b8
--- /dev/null
+++ b/vbuilder/collect
@@ -0,0 +1,4 @@
+rabbitmq-server				cs:rabbitmq-server
+glance-simplestreams-sync		git+lp:~launchpad/charm-glance-simplestreams-sync;revno=scalingstack
+launchpad-buildd-image-modifier		git+lp:charm-launchpad-buildd-image-modifier
+vbuilder-manage				git+lp:launchpad-vbuilder-manage
diff --git a/vbuilder/configs/custom-secgroups-production.yaml b/vbuilder/configs/custom-secgroups-production.yaml
new file mode 100644
index 0000000..95a7565
--- /dev/null
+++ b/vbuilder/configs/custom-secgroups-production.yaml
@@ -0,0 +1,10 @@
+applications:
+    clamav-database-mirror:
+        type: neutron
+        rules:
+            - clamav-database-mirror
+rules:
+    clamav-database-mirror:
+        # Public HTTP.  (Firewalls restrict this to builders, but there's
+        # nothing secret here.)
+        - {"protocol": "tcp", "family": "IPv4", "port": 80, "cidr": "0.0.0.0/0"}
diff --git a/vbuilder/configs/custom-secgroups-staging.yaml b/vbuilder/configs/custom-secgroups-staging.yaml
new file mode 100644
index 0000000..95a7565
--- /dev/null
+++ b/vbuilder/configs/custom-secgroups-staging.yaml
@@ -0,0 +1,10 @@
+applications:
+    clamav-database-mirror:
+        type: neutron
+        rules:
+            - clamav-database-mirror
+rules:
+    clamav-database-mirror:
+        # Public HTTP.  (Firewalls restrict this to builders, but there's
+        # nothing secret here.)
+        - {"protocol": "tcp", "family": "IPv4", "port": 80, "cidr": "0.0.0.0/0"}
diff --git a/vbuilder/expand-bundle b/vbuilder/expand-bundle
new file mode 100755
index 0000000..bbbe52e
--- /dev/null
+++ b/vbuilder/expand-bundle
@@ -0,0 +1,36 @@
+#! /usr/bin/python3
+# The vbuilder bundle is heavily parameterized in order to keep its length
+# under control and reduce repetition.  This provides an easy way to expand
+# the bundle to see the effect of changes.
+
+from argparse import ArgumentParser
+import sys
+
+from jinja2 import (
+    Environment,
+    FileSystemLoader,
+    )
+import yaml
+
+
+def main():
+    parser = ArgumentParser()
+    parser.add_argument("stage_name", help="Mojo stage name")
+    args = parser.parse_args()
+
+    template_env = Environment(loader=FileSystemLoader("."))
+    template = template_env.get_template("bundle.yaml")
+    rendered = template.render({
+        "charm_dir": "fake-charm-dir",
+        "stage_name": args.stage_name,
+        })
+    try:
+        yaml.safe_load(rendered)
+    except Exception as e:
+        print(f"Cannot parse rendered template: {e}", file=sys.stderr)
+        sys.exit(1)
+    print(rendered)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/vbuilder/manifest b/vbuilder/manifest
new file mode 100644
index 0000000..8e13254
--- /dev/null
+++ b/vbuilder/manifest
@@ -0,0 +1,5 @@
+collect
+bundle config=bundle.yaml local=secrets
+script config=upgrade-charms
+include config=manifest-secgroups
+juju-check-wait
diff --git a/vbuilder/manifest-rebuild-images b/vbuilder/manifest-rebuild-images
new file mode 100644
index 0000000..dbb5cb1
--- /dev/null
+++ b/vbuilder/manifest-rebuild-images
@@ -0,0 +1 @@
+script config=rebuild-images
diff --git a/vbuilder/manifest-secgroups b/vbuilder/manifest-secgroups
new file mode 100644
index 0000000..8c438ee
--- /dev/null
+++ b/vbuilder/manifest-secgroups
@@ -0,0 +1 @@
+script config=utils/custom-secgroups.py SKIP_STAGES=devel
diff --git a/vbuilder/rebuild-images b/vbuilder/rebuild-images
new file mode 100755
index 0000000..7b05faf
--- /dev/null
+++ b/vbuilder/rebuild-images
@@ -0,0 +1,85 @@
+#! /usr/bin/python3
+
+import os
+
+from utils import utils
+
+
+name_prefix_by_stage = {
+    "qastaging": "launchpad-buildd-qastaging",
+    "staging": "launchpad-buildd-staging",
+    "production": "launchpad-buildd",
+    }
+
+targets_by_stage = {
+    "qastaging": [
+        ("bos01", "amd64", "focal"),
+        ("bos01", "arm64", "focal"),
+        ("bos01", "arm64-gpu", "focal"),
+        ("bos01", "ppc64el", "focal"),
+        ("bos01", "s390x", "focal"),
+        ("bos02", "arm64", "focal"),
+        ("bos02", "ppc64el", "focal"),
+        ("bos02", "s390x", "focal"),
+        ("bos03", "amd64", "focal"),
+        ("bos03", "amd64-gpu", "focal"),
+        ("bos03", "arm64", "focal"),
+        ("bos03", "riscv64", "jammy"),
+        ("lcy02", "amd64", "focal"),
+        ],
+    "staging": [
+        ("bos01", "amd64", "jammy"),
+        ("bos01", "arm64", "jammy"),
+        ("bos01", "arm64-gpu", "jammy"),
+        ("bos01", "ppc64el", "jammy"),
+        ("bos01", "s390x", "jammy"),
+        ("bos02", "arm64", "jammy"),
+        ("bos02", "ppc64el", "jammy"),
+        ("bos02", "s390x", "jammy"),
+        ("bos03", "amd64", "jammy"),
+        ("lcy02", "amd64", "jammy"),
+        ],
+    "production": [
+        ("bos01", "amd64", "focal"),
+        ("bos01", "arm64", "focal"),
+        ("bos01", "ppc64el", "focal"),
+        ("bos01", "s390x", "focal"),
+        ("bos02", "arm64", "focal"),
+        ("bos02", "ppc64el", "focal"),
+        ("bos02", "s390x", "focal"),
+        ("bos03", "amd64", "focal"),
+        ("bos03", "arm64", "focal"),
+        ("bos03", "riscv64", "jammy"),
+        ("lcy02", "amd64", "focal"),
+        ],
+    }
+
+
+def get_leader_unit(juju_services, application):
+    """Get the current leader unit for an application.
+
+    "juju run-action application/leader" sometimes reports "ERROR could not
+    determine leader for ..." even though a leader exists.  Work around this.
+    """
+    return next(
+        name for name, status in juju_services[application]["units"].items()
+        if status.get("leader", False))
+
+
+def main():
+    stage = os.path.basename(os.environ["MOJO_STAGE"])
+    name_prefix = name_prefix_by_stage[stage]
+    targets = targets_by_stage[stage]
+    juju_services = utils.juju_services()
+    for region, arch, series in targets:
+        application = f"glance-simplestreams-sync-{region}-{arch}"
+        unit = get_leader_unit(juju_services, application)
+        rebuild_cmd = [
+            "juju", "ssh", unit, "sudo", "/usr/local/bin/rebuild-latest-image",
+            f"{name_prefix}/ubuntu-{series}-daily-{arch}-",
+            ]
+        utils.run(None, rebuild_cmd)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/vbuilder/upgrade-charms b/vbuilder/upgrade-charms
new file mode 100755
index 0000000..ebe3c6a
--- /dev/null
+++ b/vbuilder/upgrade-charms
@@ -0,0 +1,94 @@
+#! /usr/bin/python3
+
+import utils.check_version  # noqa: F401
+
+import os.path
+import subprocess
+
+
+targets_by_stage = {
+    "qastaging": [
+        ("bos01", "amd64"),
+        ("bos01", "arm64"),
+        ("bos01", "arm64-gpu"),
+        ("bos01", "ppc64el"),
+        ("bos01", "s390x"),
+        ("bos02", "arm64"),
+        ("bos02", "ppc64el"),
+        ("bos02", "s390x"),
+        ("bos03", "amd64"),
+        ("bos03", "amd64-gpu"),
+        ("bos03", "arm64"),
+        ("bos03", "riscv64"),
+        ("lcy02", "amd64"),
+    ],
+    "staging": [
+        ("bos01", "amd64"),
+        ("bos01", "arm64"),
+        ("bos01", "arm64-gpu"),
+        ("bos01", "ppc64el"),
+        ("bos01", "s390x"),
+        ("bos02", "arm64"),
+        ("bos02", "ppc64el"),
+        ("bos02", "s390x"),
+        ("bos03", "amd64"),
+        ("lcy02", "amd64"),
+    ],
+    "production": [
+        ("bos01", "amd64"),
+        ("bos01", "arm64"),
+        ("bos01", "ppc64el"),
+        ("bos01", "s390x"),
+        ("bos02", "arm64"),
+        ("bos02", "ppc64el"),
+        ("bos02", "s390x"),
+        ("bos03", "amd64"),
+        ("bos03", "arm64"),
+        ("bos03", "riscv64"),
+        ("lcy02", "amd64"),
+    ],
+}
+
+
+upgraded = set()
+
+
+def upgrade_charm(app_name, charm_name):
+    if app_name in upgraded:
+        return
+    subprocess.check_call(
+        [
+            "juju",
+            "upgrade-charm",
+            app_name,
+            "--path=%s"
+            % os.path.join(
+                os.environ["MOJO_REPO_DIR"],
+                os.environ["MOJO_SERIES"],
+                charm_name,
+            ),
+        ]
+    )
+    upgraded.add(app_name)
+
+
+def main():
+    stage = os.path.basename(os.environ["MOJO_STAGE"])
+    targets = targets_by_stage[stage]
+    for region, arch in targets:
+        upgrade_charm(
+            f"glance-simplestreams-sync-{region}-{arch}",
+            "glance-simplestreams-sync",
+        )
+        # lcy02-amd64 uses an anomalous name for this charm; we won't follow
+        # this pattern for future regions on amd64.
+        if (region, arch) == ("lcy02", "amd64"):
+            lbim_app_name = "launchpad-buildd-image-modifier-amd64"
+        else:
+            lbim_app_name = f"launchpad-buildd-image-modifier-{region}-{arch}"
+        upgrade_charm(lbim_app_name, "launchpad-buildd-image-modifier")
+        upgrade_charm(f"vbuilder-manage-{region}", "vbuilder-manage")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/vbuilder/utils b/vbuilder/utils
new file mode 120000
index 0000000..468ba70
--- /dev/null
+++ b/vbuilder/utils
@@ -0,0 +1 @@
+../utils
\ No newline at end of file