← Back to team overview

launchpad-reviewers team mailing list archive

[Merge] ~tushar5526/launchpad:bump-pre-commit into launchpad:master

 

Tushar Gupta has proposed merging ~tushar5526/launchpad:bump-pre-commit into launchpad:master.

Requested reviews:
  Launchpad code reviewers (launchpad-reviewers)

For more details, see:
https://code.launchpad.net/~tushar5526/launchpad/+git/launchpad/+merge/469650

Ran pre-commit autoupdate, followed by a pre-commit run on all files. 

flake8 was throwing up formatting errors for valid URL strings and needed a version bump for python3.12
-- 
The attached diff has been truncated due to its size.
Your team Launchpad code reviewers is requested to review the proposed merge of ~tushar5526/launchpad:bump-pre-commit into launchpad:master.
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index a59e761..efa5e33 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,3 +1,4 @@
+<<<<<<< .git-blame-ignore-revs
 # apply pyupgrade
 67e3b53a4375288983a72a7beb9a5a67ba739527
 # apply pyupgrade --py3-plus to lp.answers
@@ -110,3 +111,7 @@ a0cc45d527f251438cff74b4134e7a66fba42ac7
 ee5977f514d584c64afe453ac9d2eaa0fdbc3afd
 # apply black's 2024 stable style
 b5b64683e1bb26ffef31550f8405553275690deb
+=======
+# apply black and isort
+b7e61dd8bbcff898b9a500da005b4f5b0853c4ac
+>>>>>>> .git-blame-ignore-revs
diff --git a/.gitignore b/.gitignore
index 7055634..137735b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+<<<<<<< .gitignore
 # compiled python files
 *.pyc
 
@@ -103,3 +104,22 @@ lp.sfood
 apidocs
 configs/testrunner_*
 configs/testrunner-appserver_*
+=======
+*.egg-info
+*.pyc
+.pybuild
+buildd-example.conf
+charm/*.charm
+dist
+debian/debhelper-build-stamp
+debian/files
+debian/launchpad-buildd
+debian/python3-lpbuildd
+debian/tmp
+debian/*.debhelper.log
+debian/*.debhelper
+debian/*.substvars
+.tox/
+docs/_build/
+venv/
+>>>>>>> .gitignore
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4570489..18d3ad2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,3 +1,4 @@
+<<<<<<< .pre-commit-config.yaml
 # See https://pre-commit.com for more information
 # See https://pre-commit.com/hooks.html for more hooks
 repos:
@@ -46,11 +47,37 @@ repos:
           )$
 -   repo: https://github.com/PyCQA/isort
     rev: 5.12.0
+=======
+repos:
+-   repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v4.6.0
+    hooks:
+    -   id: check-added-large-files
+    -   id: check-merge-conflict
+    -   id: check-xml
+    -   id: check-yaml
+    -   id: debug-statements
+-   repo: https://github.com/asottile/pyupgrade
+    rev: v3.16.0
+    hooks:
+    -   id: pyupgrade
+        args: [--py36-plus]
+        # lpbuildd-git-proxy is copied into the build chroot and run there,
+        # so it has different compatibility constraints.
+        exclude: ^bin/lpbuildd-git-proxy$
+-   repo: https://github.com/psf/black
+    rev: 24.4.2
+    hooks:
+    -   id: black
+-   repo: https://github.com/PyCQA/isort
+    rev: 5.13.2
+>>>>>>> .pre-commit-config.yaml
     hooks:
     -   id: isort
         name: isort
         args: [--profile, black]
 -   repo: https://github.com/PyCQA/flake8
+<<<<<<< .pre-commit-config.yaml
     rev: 5.0.4
     hooks:
     -   id: flake8
@@ -102,3 +129,12 @@ repos:
     hooks:
     -   id: woke-from-source
         files: ^doc/.*\.rst$
+=======
+    rev: 7.1.0
+    hooks:
+    -   id: flake8
+-   repo: https://github.com/shellcheck-py/shellcheck-py
+    rev: v0.10.0.1
+    hooks:
+    -   id: shellcheck
+>>>>>>> .pre-commit-config.yaml
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 91b6a9c..82a91a8 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -1,5 +1,16 @@
 version: 2
 
+<<<<<<< .readthedocs.yaml
 python:
    install:
    - requirements: requirements/docs.txt
+=======
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3"
+
+sphinx:
+  configuration: docs/conf.py
+  fail_on_warning: true
+>>>>>>> .readthedocs.yaml
diff --git a/LICENSE b/LICENSE
index 30edbc5..24ac54b 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,8 @@
+<<<<<<< LICENSE
 Launchpad is Copyright 2004 Canonical Ltd.
+=======
+Launchpad is Copyright 2004-2011 Canonical Ltd.
+>>>>>>> LICENSE
 
 Canonical Ltd ("Canonical") distributes the Launchpad source code
 under the GNU Affero General Public License, version 3 ("AGPLv3").
@@ -13,8 +17,11 @@ non-commercially).
 The Launchpad name and logo are trademarks of Canonical, and may not
 be used without the prior written permission of Canonical.
 
+<<<<<<< LICENSE
 Git SCM logos are licensed Creative Commons Attribution 3.0 Unported.
 
+=======
+>>>>>>> LICENSE
 Third-party copyright in this distribution is noted where applicable.
 
 All rights not expressly granted are reserved.
@@ -685,6 +692,7 @@ For more information on this, and how to apply and follow the GNU AGPL, see
 <http://www.gnu.org/licenses/>.
 
 =========================================================================
+<<<<<<< LICENSE
 
 
 Creative Commons Attribution 3.0 Unported License
@@ -959,3 +967,5 @@ License; this License is not intended to restrict the license of any
 rights under applicable law.
 
 =========================================================================
+=======
+>>>>>>> LICENSE
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..666f026
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,15 @@
+include LICENSE
+include Makefile
+include bin/builder-prep
+include bin/buildrecipe
+include bin/in-target
+include bin/lpbuildd-git-proxy
+include bin/sbuild-package
+include bin/test_buildd_generatetranslationtemplates
+include bin/test_buildd_recipe
+include buildd-genconfig
+include debian/changelog
+include sbuildrc
+include template-buildd.conf
+include lpbuildd/buildd.tac
+recursive-include lpbuildd/tests *.diff *.tar.gz buildlog buildlog.long
diff --git a/Makefile b/Makefile
index a310604..5554e6c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,4 @@
+<<<<<<< Makefile
 # This file modified from Zope3/Makefile
 # Licensed under the ZPL, (c) Zope Corporation and contributors.
 
@@ -614,3 +615,30 @@ pydoctor:
 		--add-package=lib/canonical --project-name=Launchpad \
 		--docformat restructuredtext --verbose-about epytext-summary \
 		$(PYDOCTOR_OPTIONS)
+=======
+# Copyright 2009-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+all: deb
+
+src: clean
+	dpkg-buildpackage -rfakeroot -uc -us -S
+
+deb: clean
+	dpkg-buildpackage -rfakeroot -uc -us
+
+clean:
+	fakeroot debian/rules clean
+
+realclean:
+	rm -f ../launchpad-buildd*tar.gz
+	rm -f ../launchpad-buildd*dsc
+	rm -f ../launchpad-buildd*deb
+	rm -f ../launchpad-buildd*changes
+
+.PHONY: all clean deb
+
+check:
+	PYTHONPATH=$(CURDIR):$(PYTHONPATH) python3 -m testtools.run \
+		discover -v
+>>>>>>> Makefile
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..8cadaf3
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,15 @@
+****************
+launchpad-buildd
+****************
+
+This project is the build daemon used by Launchpad's package-building
+infrastructure.
+
+It is licensed under the GNU Affero General Public Licence, version 3 (see the
+file LICENCE) except for some files originating elsewhere, which are marked
+with the licence that applies.
+
+See https://dev.launchpad.net/ for more context.
+
+The documentation is available at
+https://launchpad-buildd.readthedocs.io/en/latest/
diff --git a/bin/builder-prep b/bin/builder-prep
new file mode 100755
index 0000000..3b4205c
--- /dev/null
+++ b/bin/builder-prep
@@ -0,0 +1,27 @@
+#!/bin/sh
+#
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# Buildd initial preparation script
+
+export PATH="/usr/bin:/bin:/usr/sbin:/sbin:$PATH"
+
+PACKAGES="launchpad-buildd python3-lpbuildd sbuild bzr-builder bzr git-build-recipe git dpkg-dev python3-debian"
+KERNEL=$(uname -snrvm)
+
+echo "Kernel version: $KERNEL"
+printf "Buildd toolchain package versions:"
+for package in $PACKAGES; do
+  version=$(dpkg-query -W "$package" 2>/dev/null | awk '{print $2}')
+  [ -z "$version" ] || printf " %s_%s" "$package" "$version"
+done
+echo "."
+
+if [ -f /etc/launchpad-buildd/default ]; then
+  eval "$(grep ntphost /etc/launchpad-buildd/default | sed 's/ //g')"
+fi
+if [ -n "$ntphost" ]; then
+  echo "Syncing the system clock with the buildd NTP service..."
+  sudo ntpdate -u "$ntphost"
+fi
diff --git a/bin/buildrecipe b/bin/buildrecipe
new file mode 100755
index 0000000..8cbda6d
--- /dev/null
+++ b/bin/buildrecipe
@@ -0,0 +1,467 @@
+#!/usr/bin/python3 -u
+# Copyright 2010-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""A script that builds a package from a recipe and a chroot."""
+
+import os
+import pwd
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+from optparse import OptionParser
+from textwrap import dedent
+
+from debian.deb822 import Deb822
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD_TREE = 201
+RETCODE_FAILURE_INSTALL_BUILD_DEPS = 202
+RETCODE_FAILURE_BUILD_SOURCE_PACKAGE = 203
+
+
+def call_report(args, env):
+    """Run a subprocess.
+
+    Report that it was run and complain if it fails.
+
+    :return: The process exit status.
+    """
+    print("RUN %r" % args)
+    return subprocess.call(args, env=env)
+
+
+class RecipeBuilder:
+    """Builds a package from a recipe."""
+
+    def __init__(
+        self,
+        build_id,
+        author_name,
+        author_email,
+        suite,
+        distroseries_name,
+        component,
+        archive_purpose,
+        git=False,
+    ):
+        """Constructor.
+
+        :param build_id: The id of the build (a str).
+        :param author_name: The name of the author (a str).
+        :param author_email: The email address of the author (a str).
+        :param suite: The suite the package should be built for (a str).
+        :param git: If True, build a git-based recipe; if False, build a
+            bzr-based recipe.
+        """
+        self.build_id = build_id
+        if isinstance(author_name, bytes):
+            author_name = author_name.decode("utf-8")
+        self.author_name = author_name
+        self.author_email = author_email
+        self.archive_purpose = archive_purpose
+        self.component = component
+        self.distroseries_name = distroseries_name
+        self.suite = suite
+        self.git = git
+        self.chroot_path = get_build_path(build_id, "chroot-autobuild")
+        self.work_dir_relative = os.environ["HOME"] + "/work"
+        self.work_dir = os.path.join(
+            self.chroot_path, self.work_dir_relative[1:]
+        )
+
+        self.tree_path = os.path.join(self.work_dir, "tree")
+        self.apt_dir_relative = os.path.join(self.work_dir_relative, "apt")
+        self.apt_dir = os.path.join(self.work_dir, "apt")
+        self.username = pwd.getpwuid(os.getuid())[0]
+        self.apt_sources_list_dir = os.path.join(
+            self.chroot_path, "etc/apt/sources.list.d"
+        )
+
+    def install(self):
+        """Install all the requirements for building recipes.
+
+        :return: A retcode from apt.
+        """
+        return self.chroot(["apt-get", "install", "-y", "lsb-release"])
+
+    # XXX cjwatson 2021-11-23: Use shutil.which instead once we can assume
+    # Python >= 3.3.
+    def _is_command_on_path(self, command):
+        """Is 'command' on the executable search path?"""
+        if "PATH" not in os.environ:
+            return False
+        path = os.environ["PATH"]
+        for element in path.split(os.pathsep):
+            if not element:
+                continue
+            filename = os.path.join(element, command)
+            if os.path.isfile(filename) and os.access(filename, os.X_OK):
+                return True
+        return False
+
+    def buildTree(self):
+        """Build the recipe into a source tree.
+
+        As a side-effect, sets self.source_dir_relative.
+        :return: a retcode from `bzr dailydeb` or `git-build-recipe`.
+        """
+        assert not os.path.exists(self.tree_path)
+        recipe_path = os.path.join(self.work_dir, "recipe")
+        manifest_path = os.path.join(self.tree_path, "manifest")
+        with open(recipe_path) as recipe_file:
+            recipe = recipe_file.read()
+        # As of bzr 2.2, a defined identity is needed.  In this case, we're
+        # using buildd@<hostname>.
+        hostname = socket.gethostname()
+        email = "buildd@%s" % hostname
+        lsb_release = subprocess.Popen(
+            [
+                "sudo",
+                "/usr/sbin/chroot",
+                self.chroot_path,
+                "lsb_release",
+                "-r",
+                "-s",
+            ],
+            stdout=subprocess.PIPE,
+            universal_newlines=True,
+        )
+        distroseries_version = lsb_release.communicate()[0].rstrip()
+        assert lsb_release.returncode == 0
+
+        if self.git:
+            print("Git version:")
+            subprocess.check_call(["git", "--version"])
+            print(
+                subprocess.check_output(
+                    ["dpkg-query", "-W", "git-build-recipe"],
+                    universal_newlines=True,
+                )
+                .rstrip("\n")
+                .replace("\t", " ")
+            )
+        else:
+            print("Bazaar versions:")
+            subprocess.check_call(["bzr", "version"])
+            subprocess.check_call(["bzr", "plugins"])
+
+        print("Building recipe:")
+        print(recipe)
+        sys.stdout.flush()
+        env = {
+            "DEBEMAIL": self.author_email,
+            "DEBFULLNAME": self.author_name.encode("utf-8"),
+            "EMAIL": email,
+            "LANG": "C.UTF-8",
+        }
+        if self.git:
+            cmd = ["git-build-recipe"]
+        elif self._is_command_on_path("brz-build-daily-recipe"):
+            cmd = ["brz-build-daily-recipe"]
+        else:
+            cmd = ["bzr", "-Derror", "dailydeb"]
+        cmd.extend(
+            [
+                "--safe",
+                "--no-build",
+                "--manifest",
+                manifest_path,
+                "--distribution",
+                self.distroseries_name,
+                "--allow-fallback-to-native",
+                "--append-version",
+                "~ubuntu%s.1" % distroseries_version,
+                recipe_path,
+                self.tree_path,
+            ]
+        )
+        retcode = call_report(cmd, env=env)
+        if retcode != 0:
+            return retcode
+        (source,) = (
+            name
+            for name in os.listdir(self.tree_path)
+            if os.path.isdir(os.path.join(self.tree_path, name))
+        )
+        self.source_dir_relative = os.path.join(
+            self.work_dir_relative, "tree", source
+        )
+        return retcode
+
+    def getPackageName(self):
+        source_dir = os.path.join(
+            self.chroot_path, self.source_dir_relative.lstrip("/")
+        )
+        changelog = os.path.join(source_dir, "debian/changelog")
+        return open(changelog, errors="replace").readline().split(" ")[0]
+
+    def getSourceControl(self):
+        """Return the parsed source control stanza from the source tree."""
+        source_dir = os.path.join(
+            self.chroot_path, self.source_dir_relative.lstrip("/")
+        )
+        # Open as bytes to allow debian.deb822 to apply its own encoding
+        # handling.  We'll get text back from it.
+        with open(
+            os.path.join(source_dir, "debian/control"), "rb"
+        ) as control_file:
+            # Don't let Deb822.iter_paragraphs use apt_pkg.TagFile
+            # internally, since that only handles real tag files and not the
+            # slightly more permissive syntax of debian/control which also
+            # allows comments.
+            return next(
+                Deb822.iter_paragraphs(control_file, use_apt_pkg=False)
+            )
+
+    def makeDummyDsc(self, package):
+        control = self.getSourceControl()
+        with open(
+            os.path.join(self.apt_dir, "%s.dsc" % package), "w"
+        ) as dummy_dsc:
+            print(
+                dedent(
+                    """\
+                    Format: 1.0
+                    Source: %(package)s
+                    Architecture: any
+                    Version: 99:0
+                    Maintainer: invalid@xxxxxxxxxxx"""
+                )
+                % {"package": package},
+                file=dummy_dsc,
+            )
+            for field in (
+                "Build-Depends",
+                "Build-Depends-Indep",
+                "Build-Conflicts",
+                "Build-Conflicts-Indep",
+            ):
+                if field in control:
+                    print(f"{field}: {control[field]}", file=dummy_dsc)
+            print(file=dummy_dsc)
+
+    def runAptFtparchive(self):
+        conf_path = os.path.join(self.apt_dir, "ftparchive.conf")
+        with open(conf_path, "w") as conf:
+            print(
+                dedent(
+                    """\
+                    Dir::ArchiveDir "%(apt_dir)s";
+                    Default::Sources::Compress ". bzip2";
+                    BinDirectory "%(apt_dir)s" { Sources "Sources"; };
+                    APT::FTPArchive::Release {
+                        Origin "buildrecipe-archive";
+                        Label "buildrecipe-archive";
+                        Suite "invalid";
+                        Codename "invalid";
+                        Description "buildrecipe temporary archive";
+                    };"""
+                )
+                % {"apt_dir": self.apt_dir},
+                file=conf,
+            )
+        ftparchive_env = dict(os.environ)
+        ftparchive_env.pop("APT_CONFIG", None)
+        ret = subprocess.call(
+            ["apt-ftparchive", "-q=2", "generate", conf_path],
+            env=ftparchive_env,
+        )
+        if ret != 0:
+            return ret
+
+        with open(os.path.join(self.apt_dir, "Release"), "w") as release:
+            return subprocess.call(
+                [
+                    "apt-ftparchive",
+                    "-q=2",
+                    "-c",
+                    conf_path,
+                    "release",
+                    self.apt_dir,
+                ],
+                stdout=release,
+                env=ftparchive_env,
+            )
+
+    def enableAptArchive(self):
+        """Enable the dummy apt archive.
+
+        We run "apt-get update" with a temporary sources.list and some
+        careful use of APT::Get::List-Cleanup=false, so that we don't have
+        to update all sources (and potentially need to mimic the care taken
+        by update-debian-chroot, etc.).
+        """
+        tmp_list_path = os.path.join(self.apt_dir, "buildrecipe-archive.list")
+        tmp_list_path_relative = os.path.join(
+            self.apt_dir_relative, "buildrecipe-archive.list"
+        )
+        with open(tmp_list_path, "w") as tmp_list:
+            print(
+                "deb-src [trusted=yes] file://%s ./" % self.apt_dir_relative,
+                file=tmp_list,
+            )
+        ret = self.chroot(
+            [
+                "apt-get",
+                "-o",
+                "Dir::Etc::sourcelist=%s" % tmp_list_path_relative,
+                "-o",
+                "APT::Get::List-Cleanup=false",
+                "update",
+            ]
+        )
+        if ret == 0:
+            list_path = os.path.join(
+                self.apt_sources_list_dir, "buildrecipe-archive.list"
+            )
+            return subprocess.call(["sudo", "mv", tmp_list_path, list_path])
+        return ret
+
+    def setUpAptArchive(self, package):
+        """Generate a dummy apt archive with appropriate build-dependencies.
+
+        Based on Sbuild::ResolverBase.
+        """
+        os.makedirs(self.apt_dir)
+        self.makeDummyDsc(package)
+        ret = self.runAptFtparchive()
+        if ret != 0:
+            return ret
+        return self.enableAptArchive()
+
+    def installBuildDeps(self):
+        """Install the build-depends of the source tree."""
+        package = self.getPackageName()
+        currently_building_contents = (
+            "Package: %s\n"
+            "Suite: %s\n"
+            "Component: %s\n"
+            "Purpose: %s\n"
+            "Build-Debug-Symbols: no\n"
+            % (package, self.suite, self.component, self.archive_purpose)
+        )
+        with tempfile.NamedTemporaryFile(mode="w+") as currently_building:
+            currently_building.write(currently_building_contents)
+            currently_building.flush()
+            os.fchmod(currently_building.fileno(), 0o644)
+            self.copy_in(currently_building.name, "/CurrentlyBuilding")
+        self.setUpAptArchive(package)
+        return self.chroot(
+            ["apt-get", "build-dep", "-y", "--only-source", package]
+        )
+
+    def chroot(self, args, echo=False):
+        """Run a command in the chroot.
+
+        :param args: the command and arguments to run.
+        :return: the status code.
+        """
+        if echo:
+            print(
+                "Running in chroot: %s"
+                % " ".join("'%s'" % arg for arg in args)
+            )
+            sys.stdout.flush()
+        return subprocess.call(
+            ["sudo", "/usr/sbin/chroot", self.chroot_path] + args
+        )
+
+    def copy_in(self, source_path, target_path):
+        """Copy a file into the target environment.
+
+        The target file will be owned by root/root and have the same
+        permission mode as the source file.
+
+        :param source_path: the path to the file that should be copied from
+            the host system.
+        :param target_path: the path where the file should be installed
+            inside the target environment, relative to the target
+            environment's root.
+        """
+        # Use install(1) so that we can end up with root/root ownership with
+        # a minimum of subprocess calls; the buildd user may not make sense
+        # in the target.
+        mode = stat.S_IMODE(os.stat(source_path).st_mode)
+        full_target_path = os.path.join(
+            self.chroot_path, target_path.lstrip("/")
+        )
+        subprocess.check_call(
+            [
+                "sudo",
+                "install",
+                "-o",
+                "root",
+                "-g",
+                "root",
+                "-m",
+                "%o" % mode,
+                source_path,
+                full_target_path,
+            ]
+        )
+
+    def buildSourcePackage(self):
+        """Build the source package.
+
+        :return: a retcode from dpkg-buildpackage.
+        """
+        retcode = self.chroot(
+            [
+                "su",
+                "-c",
+                "cd %s && "
+                "/usr/bin/dpkg-buildpackage -i -I.bzr -I.git -us -uc -S -sa"
+                % self.source_dir_relative,
+                self.username,
+            ]
+        )
+        for filename in os.listdir(self.tree_path):
+            path = os.path.join(self.tree_path, filename)
+            if os.path.isfile(path):
+                os.rename(path, get_build_path(self.build_id, filename))
+        return retcode
+
+
+def get_build_path(build_id, *extra):
+    """Generate a path within the build directory.
+
+    :param build_id: the build id to use.
+    :param extra: the extra path segments within the build directory.
+    :return: the generated path.
+    """
+    return os.path.join(os.environ["HOME"], "build-" + build_id, *extra)
+
+
+def main():
+    parser = OptionParser(
+        usage=(
+            "usage: %prog BUILD-ID AUTHOR-NAME AUTHOR-EMAIL SUITE "
+            "DISTROSERIES-NAME COMPONENT ARCHIVE-PURPOSE"
+        )
+    )
+    parser.add_option(
+        "--git",
+        default=False,
+        action="store_true",
+        help="build a git recipe (default: bzr)",
+    )
+    options, args = parser.parse_args()
+
+    builder = RecipeBuilder(*args, git=options.git)
+    if builder.install() != 0:
+        return RETCODE_FAILURE_INSTALL
+    if builder.buildTree() != 0:
+        return RETCODE_FAILURE_BUILD_TREE
+    if builder.installBuildDeps() != 0:
+        return RETCODE_FAILURE_INSTALL_BUILD_DEPS
+    if builder.buildSourcePackage() != 0:
+        return RETCODE_FAILURE_BUILD_SOURCE_PACKAGE
+    return RETCODE_SUCCESS
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/bin/check-implicit-pointer-functions b/bin/check-implicit-pointer-functions
new file mode 100755
index 0000000..36dbec3
--- /dev/null
+++ b/bin/check-implicit-pointer-functions
@@ -0,0 +1,42 @@
+#! /usr/bin/python3 -u
+#
+# Copyright 2020 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Scan for compiler warnings that are likely to cause 64-bit problems."""
+
+import sys
+from argparse import ArgumentParser
+
+from lpbuildd.check_implicit_pointer_functions import filter_log
+
+
+def main():
+    parser = ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "--inline",
+        default=False,
+        action="store_true",
+        help="Pass through input, inserting errors in-line",
+    )
+    parser.add_argument(
+        "--warnonly",
+        default=False,
+        action="store_true",
+        help="Exit zero even if problems are found",
+    )
+    args = parser.parse_args()
+    stdin = sys.stdin
+    stdout = sys.stdout
+    if sys.version_info[0] >= 3:
+        stdin = stdin.buffer
+        stdout = stdout.buffer
+    problems = filter_log(stdin, stdout, in_line=args.inline)
+    if problems and not args.warnonly:
+        return 2
+    else:
+        return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/bin/in-target b/bin/in-target
new file mode 100755
index 0000000..0cdf8f2
--- /dev/null
+++ b/bin/in-target
@@ -0,0 +1,20 @@
+#! /usr/bin/python3 -u
+#
+# Copyright 2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Run an operation in the target."""
+
+import sys
+
+from lpbuildd.target.cli import configure_logging, parse_args
+
+
+def main():
+    configure_logging()
+    args = parse_args()
+    return args.operation.run()
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/bin/lpbuildd-git-proxy b/bin/lpbuildd-git-proxy
new file mode 100755
index 0000000..578a78c
--- /dev/null
+++ b/bin/lpbuildd-git-proxy
@@ -0,0 +1,33 @@
+#! /usr/bin/python3
+# Copyright 2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Proxy the git protocol via http_proxy.
+
+Note that this is copied into the build chroot and run there.
+"""
+
+import os
+import sys
+from urllib.parse import urlparse
+
+
+def main():
+    proxy_url = urlparse(os.environ["http_proxy"])
+    proxy_arg = "PROXY:%s:%s:%s" % (
+        proxy_url.hostname,
+        sys.argv[1],
+        sys.argv[2],
+    )
+    if proxy_url.port:
+        proxy_arg += ",proxyport=%s" % proxy_url.port
+    if proxy_url.username:
+        proxy_arg += ",proxyauth=%s:%s" % (
+            proxy_url.username,
+            proxy_url.password,
+        )
+    os.execvp("socat", ["socat", "STDIO", proxy_arg])
+
+
+if __name__ == "__main__":
+    main()
diff --git a/bin/sbuild-package b/bin/sbuild-package
new file mode 100755
index 0000000..4287850
--- /dev/null
+++ b/bin/sbuild-package
@@ -0,0 +1,84 @@
+#!/bin/bash
+#
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+#
+# Authors: Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>
+#      and Adam Conrad <adam.conrad@xxxxxxxxxxxxx>
+
+# Buildd tool to update a debian chroot
+
+# Expects build id as arg 1, makes build-id to contain the build
+# Expects rest of arguments to be to pass to sbuild
+
+# Needs SBUILD to be set to a sbuild instance with passwordless sudo ability
+
+# We want a non-zero exit code from sbuild even if the implicit function
+# pointer check succeeds.
+set -o pipefail
+
+exec 2>&1
+
+# On multi-guest PPA hosts, the per-guest overlay sometimes gets out of
+# sync, and we notice this by way of a corrupted .sbuildrc.  We aren't going
+# to be able to build anything in this situation, so immediately return
+# BUILDERFAIL.
+if ! perl -c "$HOME/.sbuildrc" >/dev/null 2>&1; then
+    echo "$HOME/.sbuildrc is corrupt; builder needs repair work" 2>&1
+    exit 4
+fi
+
+BUILDID=$1
+ARCHITECTURETAG=$2
+SUITE=$3
+
+shift 3
+
+ACTUAL_NR_PROCESSORS=$(grep -c ^processor /proc/cpuinfo | sed 's/^0$/1/')
+NR_PROCESSORS=$ACTUAL_NR_PROCESSORS
+
+echo "Initiating build $BUILDID with $NR_PROCESSORS jobs across $ACTUAL_NR_PROCESSORS processor cores."
+
+if [ "$NR_PROCESSORS" -gt 1 ]; then
+  export DEB_BUILD_OPTIONS="${DEB_BUILD_OPTIONS:+$DEB_BUILD_OPTIONS }parallel=$NR_PROCESSORS"
+fi
+
+cd "$HOME/build-$BUILDID" || exit 2
+
+# sbuild tries to do this itself, but can end up trying to mkdir in
+# /build too early.
+getent group sbuild | sudo tee -a chroot-autobuild/etc/group > /dev/null || exit 2
+getent passwd sbuild | sudo tee -a chroot-autobuild/etc/passwd > /dev/null || exit 2
+sudo chown sbuild:sbuild chroot-autobuild/build || exit 2
+
+UNAME26=""
+case $SUITE in
+  hardy*|lucid*|maverick*|natty*|oneiric*|precise*)
+    if setarch --help | grep -q uname-2.6; then
+      UNAME26="--uname-2.6"
+    fi
+    ;;
+esac
+
+WARN=""
+case $ARCHITECTURETAG in
+  armel|armhf|hppa|i386|lpia|mips|mipsel|powerpc|s390|sparc)
+    LINUX32="linux32"
+    WARN="--warnonly"
+    ;;
+  alpha|amd64|arm64|hppa64|ia64|ppc64|ppc64el|s390x|sparc64|x32)
+    LINUX32="linux64"
+    ;;
+esac
+
+echo "Kernel reported to sbuild: $($LINUX32 $UNAME26 uname -rvm)"
+SBUILD="$LINUX32 $UNAME26 sbuild"
+
+case $SUITE in
+    warty*|hoary*|breezy*|dapper*|edgy*|feisty*|gutsy*|hardy*|karmic*)
+        WARN="--warnonly"
+        ;;
+esac
+
+$SBUILD "$@" | /usr/share/launchpad-buildd/bin/check-implicit-pointer-functions --inline $WARN
+exit $?
diff --git a/bin/test_buildd_generatetranslationtemplates b/bin/test_buildd_generatetranslationtemplates
new file mode 100755
index 0000000..be4e1ef
--- /dev/null
+++ b/bin/test_buildd_generatetranslationtemplates
@@ -0,0 +1,28 @@
+#!/usr/bin/python3
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+#
+# Test script for manual use only.  Exercises the
+# TranslationTemplatesBuildManager through XMLRPC.
+
+import sys
+from xmlrpc.client import ServerProxy
+
+if len(sys.argv) != 2:
+    print("Usage: %s <chroot_sha1>" % sys.argv[0])
+    print("Where <chroot_sha1> is the SHA1 of the chroot tarball to use.")
+    print("The chroot tarball must be in the local Librarian.")
+    print("See https://dev.launchpad.net/Soyuz/HowToUseSoyuzLocally";)
+    sys.exit(1)
+
+chroot_sha1 = sys.argv[1]
+
+proxy = ServerProxy("http://localhost:8221/rpc";)
+print(proxy.info())
+print(proxy.proxy_info())
+print(proxy.status())
+buildid = "1-2"
+build_type = "translation-templates"
+filemap = {}
+args = {"branch_url": "no-branch-here-sorry"}
+print(proxy.build(buildid, build_type, chroot_sha1, filemap, args))
diff --git a/bin/test_buildd_recipe b/bin/test_buildd_recipe
new file mode 100755
index 0000000..4622396
--- /dev/null
+++ b/bin/test_buildd_recipe
@@ -0,0 +1,60 @@
+#!/usr/bin/python3
+# Copyright 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+#
+# This is a script to do end-to-end testing of the buildd with a bzr-builder
+# recipe, without involving the BuilderBehaviour.
+
+import sys
+from xmlrpc.client import ServerProxy
+
+country_code = "us"
+apt_cacher_ng_host = "stumpy"
+distroseries_name = "maverick"
+recipe_text = """# bzr-builder format 0.2 deb-version {debupstream}-0~{revno}
+http://bazaar.launchpad.dev/~ppa-user/+junk/wakeonlan""";
+
+
+def deb_line(host, suites):
+    prefix = "deb http://";
+    if apt_cacher_ng_host is not None:
+        prefix += "%s:3142/" % apt_cacher_ng_host
+    return f"{prefix}{host} {distroseries_name} {suites}"
+
+
+proxy = ServerProxy("http://localhost:8221/rpc";)
+print(proxy.echo("Hello World"))
+print(proxy.info())
+print(proxy.proxy_info())
+status = proxy.status()
+print(status)
+if status[0] != "BuilderStatus.IDLE":
+    print("Aborting due to non-IDLE builder.")
+    sys.exit(1)
+print(
+    proxy.build(
+        "1-2",
+        "sourcepackagerecipe",
+        "1ef177161c3cb073e66bf1550931c6fbaa0a94b0",
+        {},
+        {
+            "author_name": "Steve\u1234",
+            "author_email": "stevea@xxxxxxxxxxx",
+            "suite": distroseries_name,
+            "distroseries_name": distroseries_name,
+            "ogrecomponent": "universe",
+            "archive_purpose": "puppies",
+            "recipe_text": recipe_text,
+            "archives": [
+                deb_line(
+                    "%s.archive.ubuntu.com/ubuntu" % country_code,
+                    "main universe",
+                ),
+                deb_line(
+                    "ppa.launchpad.net/launchpad/bzr-builder-dev/ubuntu",
+                    "main",
+                ),
+            ],
+        },
+    )
+)
diff --git a/buildd-genconfig b/buildd-genconfig
new file mode 100755
index 0000000..f991f6e
--- /dev/null
+++ b/buildd-genconfig
@@ -0,0 +1,80 @@
+#!/usr/bin/python3
+#
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+from optparse import OptionParser
+
+archtag = os.popen("dpkg --print-architecture").read().strip()
+
+parser = OptionParser()
+parser.add_option(
+    "-n",
+    "--name",
+    dest="NAME",
+    help="the name for this buildd",
+    metavar="NAME",
+    default="default",
+)
+
+parser.add_option(
+    "-H",
+    "--host",
+    dest="BINDHOST",
+    help="the IP/host this buildd binds to",
+    metavar="HOSTNAME",
+    default="localhost",
+)
+
+parser.add_option(
+    "-p",
+    "--port",
+    dest="BINDPORT",
+    help="the port this buildd binds to",
+    metavar="PORT",
+    default="8221",
+)
+
+parser.add_option(
+    "-a",
+    "--arch",
+    dest="ARCHTAG",
+    help="the arch tag this buildd claims",
+    metavar="ARCHTAG",
+    default=archtag,
+)
+
+parser.add_option(
+    "-t",
+    "--template",
+    dest="TEMPLATE",
+    help="the template file to use",
+    metavar="FILE",
+    default="/usr/share/launchpad-buildd/template-buildd.conf",
+)
+
+parser.add_option(
+    "--proxy-port",
+    dest="PROXYPORT",
+    help="the port the local builder proxy binds to",
+    metavar="PORT",
+    default="8222",
+)
+
+(options, args) = parser.parse_args()
+
+template = open(options.TEMPLATE).read()
+
+replacements = {
+    "@NAME@": options.NAME,
+    "@BINDHOST@": options.BINDHOST,
+    "@ARCHTAG@": options.ARCHTAG,
+    "@BINDPORT@": options.BINDPORT,
+    "@PROXYPORT@": options.PROXYPORT,
+}
+
+for replacement_key in replacements:
+    template = template.replace(replacement_key, replacements[replacement_key])
+
+print(template.strip())
diff --git a/charm/Makefile b/charm/Makefile
new file mode 100644
index 0000000..d992cd0
--- /dev/null
+++ b/charm/Makefile
@@ -0,0 +1,23 @@
+NAME ?= launchpad-buildd
+CHARM_SERIES ?= 20.04
+ARCH := $(shell dpkg --print-architecture)
+CHARM = ./$(NAME)_ubuntu-$(CHARM_SERIES)-$(ARCH).charm
+
+
+build: $(CHARM)
+
+$(CHARM):
+	charmcraft pack
+
+clean:
+	charmcraft clean
+	rm -rf $(CHARM)
+
+create-privileged-model:
+	juju add-model privileged localhost
+	lxc profile set juju-privileged security.privileged true
+
+deploy:
+	juju deploy $(CHARM)
+
+.PHONY: build clean create-privileged-model deploy
diff --git a/charm/README.md b/charm/README.md
new file mode 100644
index 0000000..6e268de
--- /dev/null
+++ b/charm/README.md
@@ -0,0 +1,50 @@
+# Overview
+
+This charm installs a Launchpad builder, which can build packages in
+response to requests from a Launchpad instance.  It is mainly intended for
+use by Launchpad developers testing changes to builder handling.
+
+# Setup
+
+Builders need to be able to unpack chroots, which involves being able to
+create device nodes.  Unprivileged LXD containers cannot do this.  If you
+want to use this with the LXD provider, you should therefore do this first:
+
+```
+make create-privileged-model
+```
+
+... or, if you need more control, some variation on this:
+
+```
+juju add-model privileged localhost
+lxc profile set juju-privileged security.privileged true
+```
+
+# Deployment
+
+```
+make deploy
+```
+
+This charm will deploy the launchpad-buildd package from a PPA.  If you want
+to deploy a modified version of launchpad-buildd, you can either build it
+locally and install the resulting packages manually after initial
+deployment, or you can upload a modified source package to your own PPA and
+set `install_sources` to refer to that PPA.
+
+Either way, this should eventually give you a running builder.  Find out its
+host name (e.g. `juju-XXXXXX-0.lxd`) and [add it to your local Launchpad
+instance](https://launchpad.test/builders/+new) (e.g.
+`http://juju-XXXXXX-0.lxd:8221/`).
+
+# Notes
+
+This charm gives you a non-virtualized builder, since there is no reset from
+a base image between builds; you'll need to make sure that any archives or
+snaps with builds you intend to dispatch to this builder have the "Require
+virtualized builders" option disabled.
+
+The Launchpad development wiki has [instructions on setting up the rest of
+Launchpad](https://dev.launchpad.net/Soyuz/HowToUseSoyuzLocally).
+You can skip the parts about installing the builder.
diff --git a/charm/charmcraft.yaml b/charm/charmcraft.yaml
new file mode 100644
index 0000000..644b524
--- /dev/null
+++ b/charm/charmcraft.yaml
@@ -0,0 +1,13 @@
+type: charm
+bases:
+  - build-on:
+      - name: ubuntu
+        channel: "20.04"
+    run-on:
+      - name: ubuntu
+        channel: "20.04"
+parts:
+  charm:
+    source: .
+    plugin: reactive
+    build-snaps: [charm]
diff --git a/charm/config.yaml b/charm/config.yaml
new file mode 100644
index 0000000..c3a9340
--- /dev/null
+++ b/charm/config.yaml
@@ -0,0 +1,3 @@
+options:
+    install_sources:
+        default: ppa:launchpad/buildd-staging
diff --git a/charm/layer.yaml b/charm/layer.yaml
new file mode 100644
index 0000000..9867966
--- /dev/null
+++ b/charm/layer.yaml
@@ -0,0 +1,14 @@
+repo: lp:launchpad-buildd
+includes:
+    - layer:basic
+    - layer:apt
+options:
+    apt:
+        packages:
+            - bzr-builder
+            - git-build-recipe
+            - launchpad-buildd
+            - quilt
+ignore:
+    - dist
+    - tmp
diff --git a/charm/metadata.yaml b/charm/metadata.yaml
new file mode 100644
index 0000000..97f39f4
--- /dev/null
+++ b/charm/metadata.yaml
@@ -0,0 +1,12 @@
+name: launchpad-buildd
+summary: Launchpad builder
+description: |
+    A system that can build packages in response to requests from a
+    Launchpad instance.
+tags:
+    - application_development
+maintainer: Colin Watson <cjwatson@xxxxxxxxxxxxx>
+subordinate: false
+series:
+    - focal
+min-juju-version: 2.0.0
diff --git a/charm/reactive/launchpad-buildd.py b/charm/reactive/launchpad-buildd.py
new file mode 100644
index 0000000..c2ada48
--- /dev/null
+++ b/charm/reactive/launchpad-buildd.py
@@ -0,0 +1,51 @@
+# Copyright 2016-2022 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os.path
+import re
+
+from charms.apt import status_set
+from charms.reactive import (
+    hook,
+    only_once,
+    remove_state,
+    set_state,
+    when,
+    when_not,
+)
+
+
+@only_once
+def install():
+    with open("/etc/default/launchpad-buildd", "w") as default_file:
+        print("RUN_NETWORK_REQUESTS_AS_ROOT=yes", file=default_file)
+    remove_state("launchpad-buildd.installed")
+
+
+@hook("upgrade-charm", "config-changed")
+def mark_needs_install():
+    remove_state("launchpad-buildd.installed")
+
+
+@when("apt.installed.launchpad-buildd")
+@when_not("launchpad-buildd.installed")
+def configure_launchpad_buildd():
+    # ntp.buildd isn't likely to work outside of the Canonical datacentre,
+    # and LXD containers can't set the system time.  Let's just not worry
+    # about NTP.
+    config_path = "/etc/launchpad-buildd/default"
+    with open(config_path) as config_file:
+        config = config_file.read()
+    config = re.sub(r"^ntphost = .*", "ntphost = ", config, flags=re.M)
+    with open(config_path + ".new", "w") as new_config_file:
+        new_config_file.write(config)
+    os.rename(config_path + ".new", config_path)
+    set_state("launchpad-buildd.installed")
+
+
+@when("apt.installed.bzr-builder")
+@when("apt.installed.git-build-recipe")
+@when("apt.installed.quilt")
+@when("launchpad-buildd.installed")
+def mark_active():
+    status_set("active", "Builder running")
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..cc5ceb1
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,2334 @@
+launchpad-buildd (239) focal; urgency=medium
+
+  * Restart snapd after certificate installation and proxy initialization.
+
+ -- Simone Pelosi <simone.pelosi@xxxxxxxxxxxxx>  Wed, 12 Jun 2024 16:28:26 +0200
+
+launchpad-buildd (238) focal; urgency=medium
+
+  [ Simone Pelosi ]
+  * Add snapd proxy configuration.
+
+  [ Inês Almeida ]
+  * Update fetch service token revocation to send token in payload
+    instead of auth headers.
+
+ -- Simone Pelosi <simone.pelosi@xxxxxxxxxxxxx>  Fri, 07 Jun 2024 17:05:35 +0200
+
+launchpad-buildd (237) focal; urgency=medium
+
+  [ Simone Pelosi ]
+  * Improve documentation for qastaging deployment.
+
+  [ Inês Almeida ]
+  * Add logic to allow using fetch-service as the builder proxy for snaps that
+    have the `use_fetch_service` flag on.
+    Update token revocation authentication when using the fetch service.
+    Install mitm-certificates that are now injected from the buildd-manager.
+  * Improve documentation for qastaging deployment.
+  * Add `proxy_info()` xmlrpc endpoint that can be used to retrieve proxy
+    details from a builder.
+
+ -- Inês Almeida <ines.almeida@xxxxxxxxxxxxx>  Wed, 24 Apr 2024 13:20:40 +0200
+
+launchpad-buildd (236) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Add lxc.cgroup2.* configuration, for compatibility with future
+    environments where we use unified cgroup2.  (However, we should keep
+    using systemd.unified_cgroup_hierarchy=false until we drop support for
+    building xenial; see
+    https://bugs.launchpad.net/ubuntu/xenial/+source/systemd/+bug/1962332.)
+  * Update deployment docs: riscv64 builders are no longer managed
+    separately.
+  * Document deployment to qastaging in place of dogfood.
+
+  [Simone Pelosi]
+  * Add support for snap components.
+    Snap components are already built by our build jobs but not 
+    added to `built files`. 
+    Add `.comp` filetype to the filter in `gatherResults` function
+    to process components correctly. 
+    Snap components and `.comp` file type reference: SD149
+
+ -- Simone Pelosi <simone.pelosi@xxxxxxxxxxxxx>  Fri, 16 Feb 2024 10:21:07 +0100
+
+launchpad-buildd (235) focal; urgency=medium
+
+  * sourcepackagerecipe: Create /home/buildd inside the chroot if it doesn't
+    already exist.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 18 Sep 2023 18:05:25 +0100
+
+launchpad-buildd (234) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Add basic documentation of malware scanning for CI builds.
+  * Remove restrictions on core snap names; we now install snaps
+    corresponding to whatever Launchpad sends.
+
+  [ Dimitri John Ledkov ]
+  * buildsnap: refresh preinstalled snaps.
+
+  [ Jürgen Gmach ]
+  * Add additional information about production architecture.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 28 Jun 2023 14:55:35 +0100
+
+launchpad-buildd (233) focal; urgency=medium
+
+  * Only create /dev/dm-* in LXD containers if they don't already exist
+    (fixes devtmpfs-related failures on riscv64).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 01 Jun 2023 08:24:53 +0100
+
+launchpad-buildd (232) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Drop dependency on apt-transport-https; HTTPS support is integrated into
+    apt these days.
+
+  [ Steve Langasek ]
+  * Mount devtmpfs in the lxd container, to make losetup -P work.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 24 May 2023 18:23:02 +0100
+
+launchpad-buildd (231) focal; urgency=medium
+
+  * Only ignore .bzr and .git when building source packages from recipes,
+    not all the things that "dpkg-buildpackage -I" ignores.
+  * Use lpci rather than lpcraft, following its rename.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 18 Apr 2023 18:05:50 +0100
+
+launchpad-buildd (230) focal; urgency=medium
+
+  * Apply black and isort.
+  * Enforce shellcheck in pre-commit.
+  * Add an option to disable the proxy after the "pull" phase of a snap
+    recipe build.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 10 Feb 2023 18:22:21 +0000
+
+launchpad-buildd (229) focal; urgency=medium
+
+  * Call dpkg-architecture with -a rather than setting DEB_HOST_ARCH, to
+    avoid a warning if gcc isn't installed.
+  * Prevent the kernel from killing launchpad-buildd if it runs out of
+    memory.
+  * Restructure lxd group membership handling to avoid requiring the lxd
+    snap to be installed at postinst time.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 31 Jan 2023 12:34:34 +0000
+
+launchpad-buildd (228) focal; urgency=medium
+
+  * In CI jobs, chown the VCS tree to buildd:buildd after fetching it, since
+    otherwise lpcraft running as the buildd user can't read its .git
+    directory.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 26 Jan 2023 15:06:30 +0000
+
+launchpad-buildd (227) focal; urgency=medium
+
+  * Tolerate receiving "builder_constraints": None.
+  * Check the appropriate server.key path for the LXD snap.
+  * Run lpcraft as the buildd user to allow nvidia.runtime=true to work.
+  * Create nvidia* devices in such a way that they can be used by nested
+    containers.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 26 Jan 2023 01:11:57 +0000
+
+launchpad-buildd (226) focal; urgency=medium
+
+  * Remove unused "distribution" argument from the binarypackage manager.
+    This was only used with the old internal sbuild fork, which we dropped
+    in launchpad-buildd 127.
+  * Remove the old "distroseries_name" argument from the sourcepackagerecipe
+    manager, which duplicated the common "series" argument.
+  * If the "gpu-nvidia" constraint is specified, then pass through an NVIDIA
+    GPU to the LXD container, and pass the "--gpu-nvidia" option to lpcraft.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 23 Jan 2023 17:42:51 +0000
+
+launchpad-buildd (225) focal; urgency=medium
+
+  * Fix add-trusted-keys regression due to Backend.open calling "lxc exec"
+    and draining stdin (LP: #1999420).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 13 Dec 2022 12:16:46 +0000
+
+launchpad-buildd (224) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Allow configuring builders to use a different ClamAV database URL.
+  * Require the LXD snap to be installed, rather than depending on the lxd
+    package (which no longer exists in jammy).
+  * Calculate major number for device-mapper by searching /proc/devices; the
+    "dmsetup create" approach doesn't seem to work properly in jammy.
+
+  [ Andrey Fedoseev ]
+  * `open` method is added to the backends providing access to the files
+    in target environments via a file-like object.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 12 Dec 2022 12:20:03 +0000
+
+launchpad-buildd (223) focal; urgency=medium
+
+  * Add optional malware scanning at the end of CI build jobs, currently
+    implemented using clamav.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 07 Oct 2022 17:31:08 +0100
+
+launchpad-buildd (222) focal; urgency=medium
+
+  * Remove use of six.
+  * Fix handling of librarian macaroons, where the username is empty and we
+    must use prior authentication.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 20 Sep 2022 14:39:14 +0100
+
+launchpad-buildd (221) focal; urgency=medium
+
+  [ Andy Whitcroft ]
+  * Include Build-Depends-Arch when considering direct dependancies
+    (LP: #1988999)
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 08 Sep 2022 12:19:18 +0100
+
+launchpad-buildd (220) focal; urgency=medium
+
+  * Work around https://github.com/lxc/lxcfs/issues/553 by unmounting
+    /proc/cpuinfo in LXD-based armhf builds.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 26 Aug 2022 11:50:02 +0100
+
+launchpad-buildd (219) focal; urgency=medium
+
+  * Provide additional package repositories for CI builds rather than replacing
+    the base repositories.
+
+ -- Jürgen Gmach <juergen.gmach@xxxxxxxxxxxxx>  Fri, 19 Aug 2022 17:22:00 +0200
+
+launchpad-buildd (218) focal; urgency=medium
+
+  * Use a common output directory for all lpcraft jobs.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 04 Aug 2022 10:31:56 +0100
+
+launchpad-buildd (217) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Improve deployment documentation.
+  * Make URL sanitization a little less greedy.
+
+  [ Jürgen Gmach ]
+  * Pass secrets via a YAML configuration file in order to prevent
+    credentials from being leaked in logfiles. 
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 29 Jul 2022 16:13:37 +0100
+
+launchpad-buildd (216) focal; urgency=medium
+
+  [ Andrey Fedoseev ]
+  * Allow specifying target architecture for snaps via
+    SNAPCRAFT_BUILD_FOR environment variable
+
+  [ Colin Watson ]
+  * Add a timeout when revoking proxy tokens.
+  * Log SHA-512 hash of built snaps (LP: #1979844).
+  * Revise ordering of deployment documentation.
+  * Gather *.debug from snap builds.
+
+  [ Jürgen Gmach ]
+  * Pass secrets to the CI runner.
+
+ -- Jürgen Gmach <juergen.gmach@xxxxxxxxxxxxx>  Wed, 13 Jul 2022 15:19:38 +0200
+
+launchpad-buildd (215) focal; urgency=medium
+
+  [ Jürgen Gmach ]
+  * Fix setup instructions for a development environment.
+  * Add support for plugin settings for CI builds.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 20 Jun 2022 10:12:05 +0100
+
+launchpad-buildd (214) focal; urgency=medium
+
+  * Pass apt repository and environment variable options to run-ci, not
+    run-ci-prepare (LP: #1977477).
+  * Tolerate ntphost being unset in postinst.
+  * Avoid using the deprecated apt-key (LP: #1938704).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 07 Jun 2022 22:13:42 +0100
+
+launchpad-buildd (213) focal; urgency=medium
+
+  [ Colin Watson ]
+  * Dynamically generate configuration file in lpbuildd.tests.harness.
+  * Remove unnecessary "slave" component from some file names.
+
+  [ Jürgen Gmach ]
+  * Pass environment variable and apt repository data to the CI runner.
+
+ -- Jürgen Gmach <juergen.gmach@xxxxxxxxxxxxx>  Thu, 19 May 2022 13:15:53 +0200
+
+launchpad-buildd (212) focal; urgency=medium
+
+  * Ensure that launchpad-buildd runs with lxd as a supplementary group.
+  * Fix use of shallow clones for OCI builds, which regressed in version 211
+    (LP: #1968630).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 12 Apr 2022 17:33:31 +0100
+
+launchpad-buildd (211) focal; urgency=medium
+
+  * Use "git checkout" rather than "git clone -b", since that allows
+    checking out by commit ID.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 05 Apr 2022 21:52:14 +0100
+
+launchpad-buildd (210) bionic; urgency=medium
+
+  * Stop building with dpkg-deb -Zgzip; we no longer need to install on
+    lucid.
+  * Make more loop device nodes available in LXD containers (LP: #1963706).
+  * Drop pre-Python-3.6 code using pyupgrade.
+  * Fix gathering the output of charm recipe builds that use --build-path.
+  * Convert daemon startup to systemd.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 17 Mar 2022 15:41:49 +0000
+
+launchpad-buildd (209) bionic; urgency=medium
+
+  * Upgrade charm to bionic and Python 3.
+  * Remove Python 2 support.
+  * Unversion python-debian requirement, since the minimum version required
+    was very old (pre-xenial) and the version of the packaged python-debian
+    confuses modern pip/setuptools (see
+    https://bugs.launchpad.net/bugs/1926870).
+  * Depend on dnsmasq-base.  The LXD backend needs this, but it's no longer
+    in cloud images as of focal.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 21 Feb 2022 17:11:33 +0000
+
+launchpad-buildd (208) bionic; urgency=medium
+
+  [ Jürgen Gmach ]
+  * Adjust updated command line option for the CI runner.
+  * Publish documentation on Read the Docs. 
+
+  [ Colin Watson ]
+  * Rename BUILDD_SLAVE_CONFIG environment variable to BUILDD_CONFIG.
+
+ -- Jürgen Gmach <juergen.gmach@xxxxxxxxxxxxx>  Wed, 09 Feb 2022 13:52:46 +0000
+
+launchpad-buildd (207) bionic; urgency=medium
+
+  * Return results from individual CI jobs.
+  * Support CI pipeline stages with multiple jobs.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 03 Feb 2022 10:40:58 +0000
+
+launchpad-buildd (206) bionic; urgency=medium
+
+  * Fix flake8 violations.
+  * Refactor extra status handling to be common to all build types.
+  * Fix handling of empty output in Backend.find.
+  * Add CI job support.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 11 Jan 2022 16:26:40 +0000
+
+launchpad-buildd (205) bionic; urgency=medium
+
+  * Ignore NotAutomatic flag for -proposed and -backports (LP: #1016776).
+  * Use brz-build-daily-recipe rather than "bzr -Derror dailydeb" if the
+    former exists (see LP #1943292).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 03 Dec 2021 23:15:48 +0000
+
+launchpad-buildd (204) bionic; urgency=medium
+
+  * Configure apt to automatically retry downloads on failures
+    (LP: #1949473).
+  * Configure apt to always include phased updates, even if running in LXD
+    containers (LP: #1949769).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 09 Nov 2021 21:50:17 +0000
+
+launchpad-buildd (203) bionic; urgency=medium
+
+  * Remove some more "slave" terminology from tests.
+  * Add SNAPPY_STORE_NO_CDN=1 to the environment in more places when using a
+    builder proxy (LP: #1945712).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 08 Oct 2021 14:45:06 +0100
+
+launchpad-buildd (202) bionic; urgency=medium
+
+  [ Thomas Bechtold ]
+  * Add git_shallow_clone option to vcs_fetch() and use it for OCI build
+    (LP: #1939392).
+
+  [ Ioana Lasc ]
+  * Gather dpkg.yaml for snap builds.
+
+ -- Ioana Lasc <ioana.lasc@xxxxxxxxxxxxx>  Tue, 21 Sep 2021 12:46:41 +0300
+
+launchpad-buildd (201) bionic; urgency=medium
+
+  * Sanitize non-user-bound macaroons in build logs.
+  * Switch to "charmcraft pack --destructive-mode", now that Charmcraft 1.2
+    is in the stable channel.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 01 Sep 2021 18:17:34 +0100
+
+launchpad-buildd (200) bionic; urgency=medium
+
+  * Add core22 to the list of supported core snap names.
+  * Rename "snap proxy" to "builder proxy", since it's used for several
+    different build types nowadays.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 16 Aug 2021 16:38:32 +0100
+
+launchpad-buildd (199) bionic; urgency=medium
+
+  * Upload oci layers to librarian as tar.gz
+
+ -- Ioana Lasc <ioana.lasc@xxxxxxxxxxxxx>  Tue, 03 Aug 2021 12:08:15 +0300
+
+launchpad-buildd (198) bionic; urgency=medium
+
+  * Run charmcraft in verbose mode.
+  * Honour proxy arguments when building charms.
+  * Install charmcraft with --classic.
+  * Run charmcraft in managed mode.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 20 Jul 2021 10:05:27 +0100
+
+launchpad-buildd (197) bionic; urgency=medium
+
+  [ Tom Wardill ]
+  * Add charm building support
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 14 Jun 2021 16:30:33 +0100
+
+launchpad-buildd (196) bionic; urgency=medium
+
+  * Handle symlinks in OCI image files
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Mon, 17 May 2021 14:19:04 +0100
+
+launchpad-buildd (195) bionic; urgency=medium
+
+  * sbuild-package: Temporarily remove lxd group membership (LP: #1820348).
+  * Tolerate missing "apt-get indextargets" on trusty.
+  * Add a Juju charm which can be used to deploy local launchpad-buildd
+    instances.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 26 Mar 2021 08:54:22 +0000
+
+launchpad-buildd (194) bionic; urgency=medium
+
+  [ Colin Watson ]
+  * Stop setting $mailto in .sbuildrc, to work around LP #1859010.
+  * Stop overquoting OCI --build-arg options (LP: #1902007).
+  * Update production deployment documentation in README.
+
+  [ Tom Wardill ]
+  * Fix OCI builds that don't pull another image. 
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Thu, 07 Jan 2021 10:02:27 +0000
+
+launchpad-buildd (193) bionic; urgency=medium
+
+  * Fix handling of bytes arguments passed to BuildManager.runSubProcess.
+  * Fix bytes/text handling in RecipeBuilder.buildTree.
+  * Fix check-implicit-pointer-functions on Python 3: build logs aren't
+    necessarily UTF-8, so process them as bytes (LP: #1897461).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 28 Sep 2020 13:18:50 +0100
+
+launchpad-buildd (192) bionic; urgency=medium
+
+  * Update Maintainer to launchpad-dev.
+  * Handle bytes in shell_escape, fixing recipe builds on Python 3.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 23 Sep 2020 23:49:35 +0100
+
+launchpad-buildd (191) bionic; urgency=medium
+
+  [ Colin Watson ]
+  * Fix version checks for deployments using Python 3.
+
+  [ Tom Wardill ]
+  * Fix proxy token revocation on Python 3.
+
+  [ Thiago F. Pappacena ]
+  * Add support for --build-arg when building OCI images (LP: #1892351).
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Thu, 17 Sep 2020 16:08:58 +0100
+
+launchpad-buildd (190) bionic; urgency=medium
+
+  [ Colin Watson ]
+  * Switch to git; add Vcs-* fields.
+  * Always call Resource.putChild with path as bytes.
+  * Switch RotatableFileLogObserver from class advice to a class decorator.
+  * Work around /proc/self/fd-related test hang on Python >= 3.3.
+  * Adjust tests to avoid making assumptions about dict iteration order.
+  * Open temporary files in text mode in more cases.
+  * Ensure that regex patterns with \-escapes are raw strings.
+  * Adjust X-LXD-mode header construction for Python 3.
+  * Treat build logs as binary files.
+  * Treat build output files as binary files.
+  * Treat intltool-related files as binary files.
+  * Fix bytes/text handling in backends and their tests.
+  * Fix OCI tests on Python 3.
+  * Make buildrecipe compatible with Python 3.
+  * Fix get_arch_bits for Python 3.
+  * Skip tests involving daemonisation on Python 3 and Twisted < 16.4.0.
+  * Only include mock in tests_require for Python 2.
+  * Fix snap proxy testing for Python 3.
+  * Rename [slave] configuration section to [builder].
+  * Convert translation templates builds to the VCS mixin, thereby adding
+    git support.
+  * Add a proper wrapper for check-implicit-pointer-functions.
+  * Add a python3-lpbuildd binary package.
+  * Fix LXD.run to not default to universal_newlines=True.
+  * Run on Python 3 when built for >= bionic.
+  * Add some notes on the production deployment.
+  * lpbuildd/binarypackage.py: Use "apt-get indextargets" and "apt-helper
+    cat-file" where they exist to read Packages files, rather than looking
+    in /var/lib/apt/lists/ directly.
+  * Fix environment variable quoting in chroot backend (LP: #1884936).
+
+  [ Dimitri John Ledkov ]
+  * lxd: Add riscv64 to arch table.
+
+  [ Tom Wardill ]
+  * Improve error logging in OCI post-build 
+  * Add build_path directory context for OCI builds 
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Wed, 19 Aug 2020 16:53:12 +0000
+
+launchpad-buildd (189) xenial; urgency=medium
+
+  * Fix closing tar files in OCI builds
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Thu, 09 Apr 2020 10:38:47 +0000
+
+launchpad-buildd (188) xenial; urgency=medium
+
+  * Fix cwd for OCI builds, allows Dockerfile parameter.
+
+ -- Tom Wardill <tom.wardill@xxxxxxxxxxxxx>  Wed, 01 Apr 2020 14:02:00 +0000
+
+launchpad-buildd (187) xenial; urgency=medium
+
+  [ Colin Watson ]
+  * Make lpbuildd.snap compatible with Twisted >= 17.1.0, which drops
+    support for passing an unqualified port to strports.service.
+
+  [ Tom Wardill ]
+  * Prototype Docker image building support.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 18 Mar 2020 14:22:03 +0000
+
+launchpad-buildd (186) xenial; urgency=medium
+
+  * Fix sbuildrc compatibility with xenial's sbuild.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 06 Jan 2020 15:03:00 +0000
+
+launchpad-buildd (185) xenial; urgency=medium
+
+  [ Dimitri John Ledkov ]
+  * Stop installing ltsp-server for i386 livefs-builders. Edubuntu product
+    that used that is long dead, and the package itself is now removed
+    from focal.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 16 Dec 2019 21:47:28 +0000
+
+launchpad-buildd (184) xenial; urgency=medium
+
+  * Adjust sbuildrc to handle various changes in bionic's sbuild.
+  * Configure LXD when creating a target rather than in the postinst.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 12 Dec 2019 13:03:26 +0000
+
+launchpad-buildd (183) xenial; urgency=medium
+
+  [ Michael Hudson-Doyle ]
+  * Invoke dmsetup with sudo when calculating the device major number for
+    device mapper.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 26 Nov 2019 20:43:38 +0000
+
+launchpad-buildd (182) xenial; urgency=medium
+
+  [ Michael Hudson-Doyle ]
+  * Do not make assumptions about what device major number the device mapper
+    is using. (LP: #1852518)
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 26 Nov 2019 12:22:37 +0000
+
+launchpad-buildd (181) xenial; urgency=medium
+
+  [ Robert C Jennings ]
+  * LXD: Create dm-X devices to address kpartx hangs (LP: #1852518)
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 22 Nov 2019 11:14:04 +0000
+
+launchpad-buildd (180) xenial; urgency=medium
+
+  * No-change release to work around production upgrade problem.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 06 Nov 2019 11:11:43 +0000
+
+launchpad-buildd (179) xenial; urgency=medium
+
+  [ Dimitri John Ledkov ]
+  * Add core20 to the list of supported core_snap_names. LP: #1849687
+
+  [ Colin Watson ]
+  * Simplify most conditional Python 2/3 imports using six.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 30 Oct 2019 14:26:42 +0000
+
+launchpad-buildd (178) xenial; urgency=medium
+
+  * Accept an "extra_snaps" entry in livefs arguments, which is passed to
+    livecd-rootfs to configure ubuntu-image to include additional snaps.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 02 Oct 2019 12:59:03 +0100
+
+launchpad-buildd (177) xenial; urgency=medium
+
+  * Fix recipe building to not rely on /CurrentlyBuilding existing in base
+    images (LP: #1841075).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 23 Aug 2019 10:57:13 +0100
+
+launchpad-buildd (176) xenial; urgency=medium
+
+  * Don't rely on /CurrentlyBuilding existing in base images.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 18 Jun 2019 17:53:55 +0100
+
+launchpad-buildd (175) xenial; urgency=medium
+
+  * Allow configuring APT or snap store proxies via a new [proxy]
+    configuration file section.
+  * Encode non-bytes subprocess arguments on Python 2 to avoid crashing on
+    non-ASCII file names under LC_CTYPE=C (LP: #1832072).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 10 Jun 2019 14:00:31 +0100
+
+launchpad-buildd (174) xenial; urgency=medium
+
+  * Fix a missing piece from the changes in launchpad-buildd 168 that were
+    intended to allow channel selection for core16 and core18.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 23 May 2019 17:23:26 +0100
+
+launchpad-buildd (173) xenial; urgency=medium
+
+  [ Matias Bordese ]
+  * Updated build-request-id tag to set lp prefix.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 08 May 2019 17:14:37 +0100
+
+launchpad-buildd (172) xenial; urgency=medium
+
+  * lpbuildd.snap: Pass build_request_id and build_request_timestamp through
+    to the backend.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 08 May 2019 11:45:34 +0100
+
+launchpad-buildd (171) xenial; urgency=medium
+
+  [ Matias Bordese ]
+  * Updated build request fields in SNAPCRAFT_IMAGE_INFO to use dash as
+    separator.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 26 Apr 2019 14:24:36 +0100
+
+launchpad-buildd (170) xenial; urgency=medium
+
+  [ Matias Bordese ]
+  * Add build_request_id and build_request_timestamp to SNAPCRAFT_IMAGE_INFO
+    if provided.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 25 Apr 2019 15:24:11 +0100
+
+launchpad-buildd (169) xenial; urgency=medium
+
+  * Fix test failure in TestSnapBuildManagerIteration.test_iterate_private.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 12 Mar 2019 16:42:52 +0000
+
+launchpad-buildd (168) xenial; urgency=medium
+
+  * Remove obsolete chroot-building instructions from README.
+  * Rename slave-prep to builder-prep.
+  * Generalise snap channel handling slightly, allowing channel selection
+    for core16 and core18.
+  * Move /usr/share/launchpad-buildd/slavebin to
+    /usr/share/launchpad-buildd/bin.
+  * Rename BuildDSlave to Builder and XMLRPCBuildDSlave to XMLRPCBuilder.
+  * Rename FakeSlave to FakeBuilder.
+  * Don't set SNAPCRAFT_BUILD_INFO=1 when building private snaps
+    (LP: #1639975).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 12 Mar 2019 16:09:06 +0000
+
+launchpad-buildd (167) xenial; urgency=medium
+
+  * Allow the LXD backend to accept a LXD image instead of a chroot tarball,
+    skipping the conversion step (LP: #1811677).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 07 Feb 2019 12:39:08 +0000
+
+launchpad-buildd (166) xenial; urgency=medium
+
+  [ Colin Watson ]
+  * Run all tests at package build time, not just those in lpbuildd.tests.
+
+  [ Tobias Koch ]
+  * Update LXD backend to work with LXD 3.
+  * buildlivefs: support passing a REPO_SNAPSHOT_STAMP variable into the
+    environment, used to generate images with identical version of Debian
+    packages during parallelized image builds.
+  * buildlivefs: support passing a COHORT_KEY variable into the
+    environment, used to pre-seed images with identical versions of
+    snaps during parallelized image builds.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 28 Jan 2019 13:10:27 +0000
+
+launchpad-buildd (165) xenial; urgency=medium
+
+  [ Steve Langasek, Tobias Koch ]
+  * buildlivefs: support passing an IMAGE_TARGETS variable into the
+    environment, for projects (i.e., ubuntu-cpc) that produce multiple
+    images as part of a single build, in order to be selective about what
+    builds to run.
+
+  [ Colin Watson ]
+  * Set SNAPCRAFT_BUILD_ENVIRONMENT=host when building snaps (LP: #1791201).
+  * Call gatherResults in a different thread so that it doesn't block
+    responses to XML-RPC requests (LP: #1795877).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 19 Oct 2018 08:10:22 +0100
+
+launchpad-buildd (164) xenial; urgency=medium
+
+  * Configure snap proxy settings for Subversion (LP: #1668358).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 16 Aug 2018 16:32:41 +0100
+
+launchpad-buildd (163) xenial; urgency=medium
+
+  * Revert change to tolerate chroot tarballs with a top-level directory
+    other than chroot-autobuild/; this breaks symlink unpacking.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 13 Jun 2018 00:23:35 +0100
+
+launchpad-buildd (162) xenial; urgency=medium
+
+  * Set the hostname and FQDN of LXD containers to match the host system,
+    though with an IP address pointing to the container (LP: #1747015).
+  * Tolerate chroot tarballs with a top-level directory other than
+    chroot-autobuild/.
+  * If the extra build arguments include fast_cleanup: True, then skip the
+    final cleanup steps of the build.  This can be used when building in a
+    VM that is guaranteed to be torn down after the build.
+  * Refactor VCS operations from lpbuildd.target.build_snap out to a module
+    that can be used by other targets.
+  * Allow checking out a git tag rather than a branch (LP: #1687078).
+  * Add a local unauthenticated proxy on port 8222, which proxies through to
+    the remote authenticated proxy.  This should allow running a wider range
+    of network clients, since some of them apparently don't support
+    authenticated proxies very well (LP: #1690834, #1753340).
+  * Run tar with correct working directory when building source tarballs for
+    snaps.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 12 Jun 2018 08:50:40 +0100
+
+launchpad-buildd (161) xenial; urgency=medium
+
+  * Pass build URL to snapcraft using SNAPCRAFT_IMAGE_INFO.
+  * Add an option to generate source tarballs for snaps after pulling
+    external dependencies (LP: #1763639).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 23 Apr 2018 09:44:49 +0100
+
+launchpad-buildd (160) xenial; urgency=medium
+
+  * Install sudo if installing snapcraft as a snap, since in that case
+    snapcraft's usual dependency on it is ineffective.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 23 Mar 2018 22:43:38 +0000
+
+launchpad-buildd (159) xenial; urgency=medium
+
+  * Allow all snapd services in the policy-rc.d we install in LXD
+    containers; for better or worse, snapd.postinst (via deb-systemd-invoke)
+    won't start any of them, including snapd itself, if any of them is
+    forbidden.  Mask snapd.refresh.timer so that it doesn't cause trouble.
+  * Allow optionally installing snapcraft as a snap (LP: #1737994).
+  * If Launchpad passes a build_url item in the extra build arguments, then
+    emit it at the start of the log.
+  * Make buildd-slave.tac compatible with Twisted >= 17.1.0, which drops
+    support for passing an unqualified port to strports.service.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 23 Mar 2018 07:53:24 +0000
+
+launchpad-buildd (158) xenial; urgency=medium
+
+  [ Steve Langasek ]
+  * Support passing a snap channel into a livefs build through the
+    environment.
+
+  [ Christopher Glass ]
+  * Add support for passing apt proxies to live-build.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 15 Jan 2018 10:04:42 +0000
+
+launchpad-buildd (157) xenial; urgency=medium
+
+  [ Colin Watson ]
+  * Normalise Python packaging.  We now install our modules on the normal
+    system path, using pybuild.  setup.py now installs buildd-slave.tac in
+    the lpbuildd package rather than data_files in order not to pollute the
+    top level of a virtualenv.
+  * Fall back to the package name from AC_INIT when expanding $(PACKAGE) in
+    translation configuration files if no other definition can be found.
+  * Set SNAPCRAFT_BUILD_INFO=1 to tell snapcraft to generate a manifest.
+
+  [ William Grant ]
+  * Fix inclusion of buildd-slave.tac in MANIFEST.in.
+  * Fix check-implicit-pointer-functions symlink for new python-lpbuildd path.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 29 Nov 2017 14:16:04 +0000
+
+launchpad-buildd (156) xenial; urgency=medium
+
+  * Remove useless cwd argument passed to subprocess.check_call from
+    Chroot.run.
+  * Replace shell_escape function with shlex.quote (Python 3) or pipes.quote
+    (Python 2).
+  * Fix handling of null/empty-domain case in generate_pots.
+  * Make Backend.run(cwd=) work, and refactor BuildLiveFS and BuildSnap to
+    use it.  This fixes translation templates builds, which were assuming
+    that this worked.
+  * Remove executable bit from
+    lpbuildd/target/generate_translation_templates.py.
+  * Grant mac_admin and mac_override capabilities to LXD containers.  These
+    are needed to load AppArmor profiles when installing the core snap
+    (LP: #1730376).
+  * Explicitly install udev when building snaps or livefses, to work around
+    LP #1731519.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 13 Nov 2017 16:11:17 +0000
+
+launchpad-buildd (155) xenial; urgency=medium
+
+  * Refactor lpbuildd.pottery.intltool to avoid calling chdir.
+  * Merge TranslationTemplatesBuildState.{INSTALL,GENERATE} into a single
+    state.
+  * Convert generate-translation-templates to the new Operation framework.
+  * Use Python 3-style print functions.
+  * Make urllib imports Python 3-compatible.
+  * Make xmlrpc imports Python 3-compatible.
+  * Make configparser imports Python 3-compatible.
+  * Handle dict API changes in Python 3.
+  * Raise more useful exceptions when LXD.copy_in or LXD.copy_out fail.
+  * Make Backend.run implementations print command output if echo and
+    get_output are both true.
+  * Make Backend.is_package_available handle the case where the requested
+    package name is purely virtual (LP: #1732511).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 02 Nov 2017 11:42:24 +0000
+
+launchpad-buildd (154) xenial; urgency=medium
+
+  * The previous patch was labouring under mistaken assumptions: it's
+    actually the mounted-dev Upstart job that we race with in trusty
+    containers, so neuter that instead.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 19 Oct 2017 10:29:30 +0100
+
+launchpad-buildd (153) xenial; urgency=medium
+
+  * Defend against racing with udev to create loop devices in trusty
+    containers (LP: #1723216).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 18 Oct 2017 07:57:32 +0100
+
+launchpad-buildd (152) xenial; urgency=medium
+
+  [ Colin Watson ]
+  * Accept a "debug" entry in livefs arguments, which enables detailed
+    live-build debugging.
+  * Set SHELL=/bin/sh in snap builds, since it was previously passed through
+    by the chroot backend and some build systems expect SHELL to be set
+    (LP: #1716739).
+
+  [ Robert C Jennings ]
+  * LXD: Do not drop sys_rawio capability (LP: #1716060).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 13 Sep 2017 14:51:48 +0100
+
+launchpad-buildd (151) xenial; urgency=medium
+
+  * Run snapd with SNAPPY_STORE_NO_CDN=1, since the buildd network isn't
+    allowed to talk to the CDN.
+  * Create loop devices in LXD containers manually using mknod rather than
+    as LXD-managed devices, since the latter involves bind-mounting
+    individual devices which confuses some livecd-rootfs scripts.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 08 Sep 2017 01:41:20 +0100
+
+launchpad-buildd (150) xenial; urgency=medium
+
+  * Tell LXD to disable seccomp on powerpc, since it doesn't work there on
+    Linux 4.4.
+  * Make loop devices available to LXD containers.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 06 Sep 2017 12:29:38 +0100
+
+launchpad-buildd (149) xenial; urgency=medium
+
+  * Clamp the TCP MSS on the LXD bridge interface to the path MTU, to avoid
+    problems in environments where the path MTU is lower than 1500.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 01 Sep 2017 13:51:38 +0100
+
+launchpad-buildd (148) xenial; urgency=medium
+
+  * Move the contents of /usr/share/launchpad-buildd/slavebin/ into bin/ in
+    the source package, to keep things a bit more organised.
+  * Run tests at build time, now that python-txfixtures is in
+    ppa:launchpad/ubuntu/ppa (and >= zesty).
+  * Drop qemu emulation support.  It was quite unreliable, and we've had
+    real hardware for a while.
+  * Remove most architecture hardcoding from lpbuildd.util, relying on
+    dpkg-architecture instead.
+  * Use bzr's command-line interface rather than bzrlib, to ease porting to
+    Python 3.
+  * Rewrite update-debian-chroot in Python, allowing it to use lpbuildd.util
+    and to have unit tests.
+  * Rewrite override-sources-list in Python, allowing it to have unit tests.
+  * Rewrite add-trusted-keys in Python, allowing it to have unit tests.
+  * Configure sbuild to use schroot sessions rather than sudo.
+  * Rewrite unpack-chroot and remove-build in Python, allowing them to have
+    unit tests.
+  * Rewrite mount-chroot and umount-chroot in Python, allowing them to have
+    unit tests.
+  * Rewrite scan-for-processes in Python, allowing it to have unit tests.
+  * Improve "RUN:" log messages to be copy-and-pasteable as shell commands,
+    which is sometimes useful while debugging.
+  * Convert buildlivefs to the new Operation framework and add unit tests.
+  * Convert buildsnap to the new Operation framework and add unit tests.
+  * Add a LXD backend.
+  * Switch snap builds to the LXD backend.
+  * Switch livefs builds to the LXD backend.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 30 Aug 2017 15:18:41 +0100
+
+launchpad-buildd (147) xenial; urgency=medium
+
+  * Revert change to run snapcraft as non-root with passwordless sudo; this
+    broke "type: os" and "type: kernel" snap builds, and requires more
+    thought.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 25 Jul 2017 13:48:10 +0100
+
+launchpad-buildd (146) xenial; urgency=medium
+
+  * buildsnap: Initialise git submodules (LP: #1694413).
+  * Run snapcraft as non-root with passwordless sudo, since we run into
+    buggy corner cases in some plugins when running as root (LP: #1702656).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 18 Jul 2017 17:03:36 +0100
+
+launchpad-buildd (145) xenial; urgency=medium
+
+  * buildrecipe: Explicitly mark the local apt archive as trusted
+    (LP: #1701826).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 03 Jul 2017 15:03:51 +0100
+
+launchpad-buildd (144) xenial; urgency=medium
+
+  * buildsnap: Fix revision_id computation to handle the case where
+    --git-path is not passed.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 15 May 2017 21:45:16 +0100
+
+launchpad-buildd (143) xenial; urgency=medium
+
+  * Record the branch revision used to build a snap and return it along with
+    other XML-RPC status information (LP: #1679157).
+  * Write out trusted keys sent by buildd-manager (LP: #1626739).
+  * Add tests for lpbuildd.pottery, extracted from Launchpad.
+  * Configure a git:// proxy for snap builds (LP: #1663920).
+  * buildsnap: If --git-repository is passed but --git-path is not, build
+    the default branch of the repository (LP: #1688224).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 12 May 2017 16:47:57 +0100
+
+launchpad-buildd (142) trusty; urgency=medium
+
+  * lpbuildd.binarypackage: Pass DEB_BUILD_OPTIONS=noautodbgsym if we have
+    not been told to build debug symbols (LP: #1623256).
+  * debian/upgrade-config, lpbuildd.slave: Drop compatibility with ancient
+    pre-lucid versions of python-apt.
+  * lpbuildd.pottery.intltool: Remove unused and Python-3-unfriendly
+    string/file conditional from ConfigFile.__init__.
+  * Use Python-3-compatible forms of "print" and "except".
+  * buildsnap: Set SNAPCRAFT_SETUP_CORE=1 during pull phase so that
+    snapcraft will fetch and unpack the core snap for classic confinement
+    when necessary (LP: #1650946).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 10 Feb 2017 14:53:43 +0000
+
+launchpad-buildd (141) trusty; urgency=medium
+
+  * buildsnap: Grant access to the proxy during the build phase as well as
+    during the pull phase (LP: #1642281).
+  * buildsnap: Grant access to the proxy during the repo phase, allowing the
+    base branch to be fetched from an external site.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 05 Dec 2016 19:05:50 +0000
+
+launchpad-buildd (140) trusty; urgency=medium
+
+  * buildsnap: Catch urllib2.URLError as well as urllib2.HTTPError when
+    trying to revoke the proxy token (LP: #1610916).
+  * lpbuildd.snap: Upload *.manifest files as well as *.snap (LP: #1608432).
+  * buildsnap: Set https_proxy to an http:// URL rather than https://; the
+    former is more accurate anyway and the latter breaks npm (LP: #1588870).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 16 Sep 2016 17:39:12 +0100
+
+launchpad-buildd (139) trusty; urgency=medium
+
+  * buildsnap: Set LANG=C.UTF-8 when running snapcraft.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 05 Apr 2016 16:11:01 +0100
+
+launchpad-buildd (138) trusty; urgency=medium
+
+  [ Colin Watson ]
+  * slave-prep: Output current versions of git-build-recipe, git, and
+    qemu-user-static.
+  * Always raise exception instances rather than using the two-argument form
+    of raise.
+  * buildsnap: Run just "snapcraft" rather than "snapcraft all"; works with
+    snapcraft << 2.0 and >= 2.3.
+  * sbuild-package: Default LANG/LC_ALL to C.UTF-8 (LP: #1552791).
+
+  [ Kit Randel ]
+  * Add http/s proxy support for snap builds.
+  * Refactor buildsnap into distinct repo, pull, build and proxy token
+    revocation phases.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 03 Mar 2016 15:44:12 -0300
+
+launchpad-buildd (137) trusty; urgency=medium
+
+  * Remove handling for pre-karmic versions of Twisted that didn't support
+    --umask.
+  * Stop cleaning /var/log/launchpad-buildd/ from cron; logrotate handles
+    that now.
+  * Configure systemd-timesyncd to use the NTP server configured in
+    /etc/launchpad-buildd/default.
+  * Try to load the nbd module when starting launchpad-buildd
+    (LP: #1531171).
+  * buildrecipe: Add option parsing framework.
+  * Use git-build-recipe to run git-based recipe builds (LP: #1453022).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 18 Jan 2016 11:50:05 +0000
+
+launchpad-buildd (136) trusty; urgency=medium
+
+  * Use twisted.python.log.msg rather than print to write to the log file.
+    Twisted 15.2's stdio wrapper tries to decode as ASCII and thus breaks on
+    sbuild's UTF-8 section markers, and this seems to be the simplest way to
+    fix that while preserving source compatibility with earlier versions of
+    Twisted.
+  * Add Python packaging files so that Launchpad's test suite can
+    incorporate this as a Python dependency rather than requiring
+    python-lpbuildd to be installed on the test system.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 13 Nov 2015 13:59:48 +0000
+
+launchpad-buildd (135) trusty; urgency=medium
+
+  * debian/control: Require python-debian (>= 0.1.23), needed for changes in
+    launchpad-buildd 133.
+  * debian/control: Drop dependency on linux32.  It's been in util-linux for
+    ages, which is Essential, and utopic dropped the Provides.
+  * debian/launchpad-buildd.init: Set "Should-Start: cloud-init" to ensure
+    that launchpad-buildd is started after the hostname is set.
+  * Simplify BuilddSlaveTestSetup slightly.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 29 Oct 2015 17:49:57 +0000
+
+launchpad-buildd (134) trusty; urgency=medium
+
+  * buildsnap: Drop explicit installation of sudo, now fixed in snapcraft.
+  * Rewrite debian/rules in modern dh style.
+  * buildsnap: Pass SNAPCRAFT_LOCAL_SOURCES=1 to snapcraft so that it uses
+    the build's sources.list.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 24 Sep 2015 10:20:13 +0100
+
+launchpad-buildd (133) trusty; urgency=medium
+
+  * Liberalise dep-wait matching regexes slightly so that they match
+    multi-line output properly, as in the case where multiple
+    build-dependencies are uninstallable.
+  * If there is a mix of definite and dubious dep-wait output, then analyse
+    the situation rather than trusting just the definite information.
+  * Handle architecture restrictions, architecture qualifications, and
+    restriction formulas (build profiles) in build-dependencies.
+  * Add support for building snaps (LP: #1476405).
+  * slave-prep: Output current python-debian version, useful for debugging
+    build-dependency parsing problems.
+  * Strip qualifications and restrictions even from dep-waits derived solely
+    from sbuild output.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 04 Aug 2015 22:58:47 +0100
+
+launchpad-buildd (132) trusty; urgency=medium
+
+  * Fix incorrect dscpath construction that caused a crash when analysing
+    dubious dep-wait cases.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 15 Jul 2015 12:09:08 +0100
+
+launchpad-buildd (131) trusty; urgency=medium
+
+  * slave-prep: Output current sbuild version, now that it's a separate
+    package.
+  * buildrecipe: Pass --only-source to "apt-get build-dep" to force it to
+    use the source package we care about rather than trying to map through
+    binary package names.
+  * Make sbuild use "sudo -E" rather than just sudo.  It will still filter
+    the environment itself, but this means that variables such as
+    DEB_BUILD_OPTIONS will be passed through given our standard buildd
+    sudoers configuration.
+  * Analyse dubious dep-wait cases ("but it is not installed" or "but it is
+    not going to be installed") manually to check whether any direct
+    build-dependencies are missing, and if so generate an appropriate
+    dep-wait (LP: #1468755).
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 13 Jul 2015 14:16:11 +0100
+
+launchpad-buildd (130) trusty; urgency=medium
+
+  * Reimplement build-dependency installation for recipes by hand using
+    sbuild-like logic, allowing us to drop use of pbuilder (LP: #728494) and
+    support :native in recipe build-dependencies (LP: #1322294).
+  * Stop cleaning /home/buildd/public_html/ddebs/ and
+    /home/buildd/public_html/translations/, now that we're using an sbuild
+    that doesn't publish anything there.
+  * Drop apache2 dependency, which was only needed for old-style
+    ddeb/translation publishing.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 29 Jun 2015 17:56:52 +0100
+
+launchpad-buildd (129) trusty; urgency=low
+
+  [ William Grant ]
+  * Tighten apt depwait parsing to not return uninstallable deps as missing.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 04 Jun 2015 11:42:13 +0100
+
+launchpad-buildd (128) trusty; urgency=medium
+
+  [ Colin Watson ]
+  * Use sbuild's --resolve-alternatives mode, to match the behaviour of the
+    old internal sbuild fork.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Tue, 26 May 2015 11:27:26 +1000
+
+launchpad-buildd (127) trusty; urgency=low
+
+  [ William Grant ]
+  * Switch from an internal sbuild fork to the system package.
+    - Dropped copy of sbuild.
+    - Install ~buildd/.sbuildrc instead of ~buildd/.lp-sbuildrc.
+    - Clean and update sbuildrc.
+    - Write out /CurrentlyBuilding from Python.
+    - Rewrite failure stage and depwait detection to cope with modern sbuild.
+  * Refactor lpbuildd.binarypackage tests to be readable.
+  * Drop duplicated paths from the config file.
+
+  [ Colin Watson ]
+  * Apply more reasonable log handling, as well as logrotate.
+    RotatableFileLogObserver class borrowed from Launchpad, amended to use
+    SIGHUP as its reopening signal.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Wed, 20 May 2015 13:38:51 +0100
+
+launchpad-buildd (126) trusty; urgency=medium
+
+  [ Colin Watson ]
+  * Build with dpkg-deb -Zgzip, so that the resulting .deb can still be
+    installed on lucid.
+  * Drop support for running binarypackage builds without a "suite"
+    argument; Launchpad has passed this for all binarypackage builds since
+    2007.
+  * Expect a "distribution" argument in binarypackage and pass it to
+    sbuild's --archive option (LP: #1348077).  Remove the hardcoded
+    --archive=ubuntu from the configuration file.
+  * upgrade-config: Use python-apt for version comparison (LP: #574713).
+  * debian/launchpad-buildd.cron.daily: Remove old ddebs and translations
+    directories recursively (LP: #1417893).
+
+  [ William Grant ]
+  * Remove the 1GB RLIMIT_AS for recipe builds. All virtual builders now have
+    4GiB of RAM, so even maxing out a 32-bit address space can't cause much
+    trashing. This also fixes build dependency installation for some packages,
+    as the ulimit was erroneously applied to more than just the tree build
+    phase (LP: #693524).
+  * Drop the long-obsolete "debian" alias for the "binarypackage" build
+    manager (LP: #538844).
+
+  [ Adam Conrad ]
+  * Avoid removing the buildlog in the cron.daily cleanup job, so that we
+    don't end up removing it and crashing lp-buildd on a long-hung build.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 06 Feb 2015 21:03:36 +0000
+
+launchpad-buildd (125) trusty; urgency=medium
+
+  [ Dimitri John Ledkov ]
+  * Enable verbose build logs (LP: #516208).
+
+  [ Colin Watson ]
+  * Calculate the FQDN dynamically in sbuildrc rather than substituting it
+    in from the postinst.  This is friendlier to scalingstack, where a
+    single image is used for multiple guests.
+  * Set V=1 rather than DH_VERBOSE=1, in line with Debian buildds and to
+    cover some more build systems (see https://bugs.debian.org/751528).
+  * Fix lpbuildd.livefs tests to account for the "suite" argument no longer
+    being accepted.
+  * Set LANG=C.UTF-8 when running "bzr builder", to ensure that it can
+    handle UTF-8 file and author names (LP: #1273487).
+
+  [ Adam Conrad ]
+  * scan-for-processes: Don't explode if one of the processes we were
+    going to kill completes before we get around to killing it.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Mon, 04 Aug 2014 08:51:37 +0200
+
+launchpad-buildd (124) hardy; urgency=medium
+
+  [ Adam Conrad ]
+  * Make launchpad-buildd more self-contained to avoid sbuild conflict:
+    - Move our forked sbuild to /usr/share/launchpad-buildd/slavebin
+    - Look for and use ~/.lp-sbuildrc instead of ~/.sbuildrc
+    - Move /etc/sbuild.conf to /usr/share/launchpad-buildd/sbuild.conf
+    - Move our internal helper binaries to /usr/share/launchpad-buildd
+    - Remove now unnecessary lintian overrides for launchpad-buildd
+  * Remove empty and obsolete /usr/share/launchpad-buildd/lpbuildd
+  * Update Standards-Version, and switch Conflicts to Breaks/Replaces.
+  * Remove instance configuration from /etc/launchpad-buildd/ on purge.
+
+  [ Colin Watson ]
+  * Install ltsp-server (but not its Recommends) for i386 livefs builds, as
+    Edubuntu needs it.
+  * Stop accepting the "suite" argument to livefs builds, now that Launchpad
+    has been updated to use the newer series/pocket protocol.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 04 Jul 2014 16:24:26 +0100
+
+launchpad-buildd (123) hardy; urgency=medium
+
+  * Fix handling of livefs builds for the -proposed pocket.
+  * Accept an "extra_ppas" entry in livefs arguments, which is passed to
+    livecd-rootfs to request that the image be built against additional
+    PPAs.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Sun, 22 Jun 2014 16:10:44 +0100
+
+launchpad-buildd (122) hardy; urgency=medium
+
+  * Drop the status_dict XML-RPC method, now that the master uses the
+    new-style dict-flavoured status method.
+  * Don't add symlinks to the results of livefs builds (LP: #1247461).
+  * Cope with builds that return multiple files with identical content.
+  * If a build is aborted between subprocesses, pretend that it was
+    terminated by a signal.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 13 May 2014 16:46:52 +0100
+
+launchpad-buildd (121) hardy; urgency=medium
+
+  * Retry "apt-get update" on failure after a short delay, as this
+    occasionally fails due to racing with an archive pulse.
+  * Go back to setting explicit values for LANG, LC_ALL, and LANGUAGE rather
+    than unsetting them, since otherwise sudo/pam_env may fill in unwanted
+    values from /etc/default/locale.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Tue, 28 Jan 2014 13:40:40 +0000
+
+launchpad-buildd (120) hardy; urgency=low
+
+  [ Colin Watson ]
+  * Unset LANG and LC_ALL rather than setting them to C, and unset a number
+    of other environment variables too (including DISPLAY and TERM), in line
+    with Debian buildds.
+  * Make the status XML-RPC method a synonym for status_dict.
+  * Add a new "livefs" build manager, based on livecd-rootfs/BuildLiveCD
+    (LP: #1247461).
+  * Remove virtualization check from buildrecipe.  It was a rather futile
+    security check as escaping chroots is trivial, and it will fail when the
+    PPA builder pool is converted to scalingstack.
+
+  [ Adam Conrad ]
+  * update-debian-chroot: Allow arm64-on-x86 builds with qemu-aarch64-static.
+  * slave-prep: output current dpkg-dev version for debugging purposes.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 23 Jan 2014 12:30:54 +0000
+
+launchpad-buildd (119) hardy; urgency=low
+
+  * Mount /dev/pts with -o gid=5,mode=620 to avoid needing pt_chown.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Thu, 10 Oct 2013 08:56:07 -0600
+
+launchpad-buildd (118) hardy; urgency=low
+
+  [ William Grant ]
+  * Fix fallback to PACKAGEFAIL of unknown sbuild DEPFAIL conditions
+    (LP: #1235038).
+
+  [ Colin Watson ]
+  * Fail the builder immediately if $HOME/.sbuildrc is corrupt
+    (LP: #1235287).
+  * Add a status_dict XML-RPC method for better extensibility, including
+    reporting the python-lpbuildd version (LP: #680514).
+
+ -- William Grant <william.grant@xxxxxxxxxxxxx>  Tue, 08 Oct 2013 15:09:47 +1100
+
+launchpad-buildd (117) hardy; urgency=low
+
+  * Fix dep-wait detection when recipes fail to install build-dependencies
+    (LP: #1234621).
+  * Remove *.pyc files from source tree on clean.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 03 Oct 2013 12:43:27 +0100
+
+launchpad-buildd (116) hardy; urgency=low
+
+  [ Colin Watson ]
+  * Remove obsolete BuilderStatus.ABORTED.
+  * Remove obsolete BuildDSlave.fetchFile method, unused since October 2005.
+  * If the expected .changes file doesn't exist, consider this as a package
+    build failure rather than crashing (LP: #993642).
+  * Don't attempt to read entire files into memory at once when storing them
+    in the file cache.
+  * Rearrange build log searching to avoid reading the entire build log into
+    memory at once (LP: #1227086).
+
+  [ Adam Conrad ]
+  * Tidy up log formatting of the "Already reaped..." message.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Fri, 27 Sep 2013 13:08:59 +0100
+
+launchpad-buildd (115) hardy; urgency=low
+
+  [ Adam Conrad ]
+  * Short the readlink call in scan-for-process with a true to avoid
+    prematurely exiting the process scan when tripping over zombies.
+  * Write to temporary cache files and then rename after validation
+    to avoid the cache containing broken aborted files (LP: #471076)
+  * Skip nonexistent directories in cron cleanup code to avoid vomit
+    in cron log on fresh installs that lack those dirs (LP: #559115)
+  * Build buildd-slave-example.conf from template-buildd-slave.conf
+    using buildd-genconfig at package build time so it's not stale.
+  * Add a build-dependency on python to make buildd-genconfig work.
+  * Add kernel name (hey, we might build on another kernel some day)
+    and hostname (to help us track build host issues) to uname call.
+  * Add x32 and ppc64el to the list of 64-bit arches for linux64.
+  * Strip trailing whitespace in buildd-genconfig because I'm anal.
+  * Mangle recipe versions to match backports policy (LP: #1095103)
+  * Make scan-for-processes log output match the other slave helpers.
+
+  [ Colin Watson ]
+  * Move scan-for-processes up to the top-level slave code so that it is
+    available for more general use.
+  * Make abort work properly, calling scan-for-processes to kill all
+    processes in the chroot.
+
+ -- Colin Watson <cjwatson@xxxxxxxxxx>  Thu, 29 Aug 2013 11:32:23 +0100
+
+launchpad-buildd (114) hardy; urgency=low
+
+  * Don't use the uname-2.6 hack when building on quantal and newer.
+  * Display the linux32-faked kernel version before calling sbuild.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Tue, 24 Apr 2012 07:44:18 -0600
+
+launchpad-buildd (113) hardy; urgency=low
+
+  * Shut up umount-chroot's verbose output, it served its purpose.
+  * Yank out sbuild's dependency removal code, as we never once
+    checked the return from this anyway, so it's just wasted time.
+  * Stop writing to avg-space and avg-time, which we don't use.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Thu, 22 Mar 2012 04:01:48 -0600
+
+launchpad-buildd (112) hardy; urgency=low
+
+  [ Jelmer Vernooij ]
+  * Prevent slave from blowing up when it is aborted before a job has
+    started. LP: #497772
+
+  [ Adam Conrad ]
+  * Update sbuild-package and update-debian-chroot to use linux32/64
+    universally, and to pass --uname-2.6 when available, so we can
+    use 3.x.x kernels to build older releases on the buildds.
+  * Fix sbuild-package to report the correct number of cores/jobs.
+  * Make sure /usr/bin/check-implicit-pointer-functions is called for
+    all 64-bit builds on lucid and above, this logic got broken.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Wed, 25 Jan 2012 11:27:55 -0700
+
+launchpad-buildd (111) hardy; urgency=low
+
+  * Add preppath to buildd-slave-test.conf, to unbreak the LP test suite.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Fri, 09 Dec 2011 08:58:17 +1100
+
+launchpad-buildd (110) hardy; urgency=low
+
+  [ Jelmer Vernooij ]
+  * Use the actual target distroseries name in changelog, rather than
+    the same as the last entry. LP: #855479
+  * Use os.SEEK_END constant now that all build slaves run at least
+    hardy. LP: #239213
+
+  [ Adam Conrad ]
+  * Create a new slavebin script called 'slave-prep' to kick off builds:
+    - Move useless "echo" fork to slave-prep, and include our version
+    - Move ntpdate call from unpack-chroot to slave-prep
+    - Add preppath to the default config, and add a version 110 upgrade
+      stanza to our config file upgrading script to fix upgrades
+  * While doing the above, s/Synching/Syncing/ 'cause it drives me nuts
+  * Make slave-prep output versions of bzr, bzr-builder, python-lpbuildd
+
+  [ William Grant ]
+  * Log `uname -a` as well.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Mon, 05 Dec 2011 15:01:43 +1100
+
+launchpad-buildd (109) hardy; urgency=low
+
+  * Use sudo when installing qemu into the chroot.
+  * Only install qemu into the chroot when building arm* on x86, so armhf
+    builds on armel hosts don't try to do it.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Tue, 29 Nov 2011 22:02:00 +1100
+
+launchpad-buildd (108) hardy; urgency=low
+
+  [ Adam Conrad ]
+  * Use the chroot's dpkg-architecture instead of the base system.
+
+  [ Nick Moffitt ]
+  * Fixed up sbuild-package and update-debian-chroot to support
+    syscall-emulated ARM builds on non-ARM hardware.
+  * Added Recommends for qemu-user-static to launchpad-buildd, as the
+    package only exists in Universe in natty and later.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Tue, 29 Nov 2011 19:52:43 +1100
+
+launchpad-buildd (107) hardy; urgency=low
+
+  * Correction to generate-translation-templates for the new file location.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Mon, 21 Nov 2011 16:28:50 +1100
+
+launchpad-buildd (106) hardy; urgency=low
+
+  * Safer parsing in upgrade-config.
+  * Get 'make check' working for the split-out tree.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Mon, 21 Nov 2011 12:19:52 +1100
+
+launchpad-buildd (105.1) hardy; urgency=low
+
+  * Add strict version dependency of launchpad-buildd on python-lpbuildd.
+  * Add explicit Python dependency for lintian.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Mon, 21 Nov 2011 12:16:34 +1100
+
+launchpad-buildd (105) hardy; urgency=low
+
+  * Remove attempt to run dpkg-query from inside the slave: each state machine
+    step can run only a single iteration.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 18 Nov 2011 19:18:21 +1100
+
+launchpad-buildd (104) hardy; urgency=low
+
+  * Don't expect bzr-builddeb in the chroot; it's not there.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 18 Nov 2011 18:46:23 +1100
+
+launchpad-buildd (103) hardy; urgency=low
+
+  * Log dpkg versions from the slave, where they will be externally visible.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 18 Nov 2011 15:17:08 +1100
+
+launchpad-buildd (102) hardy; urgency=low
+
+  * Show dpkg versions of launchpad-buildd and some other relevant packages at
+    startup, for debuggability.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 18 Nov 2011 13:26:30 +1100
+
+launchpad-buildd (101) hardy; urgency=low
+
+  * Pass -Derror to bzr dailydeb. LP: 890892
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Wed, 16 Nov 2011 21:05:12 +1100
+
+launchpad-buildd (100) hardy; urgency=low
+
+  * Move python-lpbuildd to section python.
+  * Don't create /var/run/launchpad-buildd during installation, it's
+    already created at init-time.
+  * Remove unnecessary debian/launchpad-buildd.conffiles. debhelper
+    already adds the required conffiles.
+  * In buildrecipe, pass -sa to dpkg-buildpackage so the orig tarball(s)
+    always get included. LP: #891892
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 14:43:31 +0100
+
+launchpad-buildd (99) hardy; urgency=low
+
+  * launchpad-buildd conflicts with sbuild.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Wed, 16 Nov 2011 10:53:43 +1100
+
+launchpad-buildd (98) hardy; urgency=low
+
+  * Add launchpad-buildd dependency on python-apt, as an accomodation for it
+    being only a Recommends but actually required by python-debian.
+    LP: #890834
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Wed, 16 Nov 2011 10:28:48 +1100
+
+launchpad-buildd (97) hardy-cat; urgency=low
+
+  * drop bzr-builder dependency entirely and handle it in the autoinstall
+    process on x86 virtual builders
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 15 Nov 2011 03:15:23 -0700
+
+launchpad-buildd (96) hardy-cat; urgency=low
+
+  * only depend on bzr-builder on i386.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 15 Nov 2011 02:46:54 -0700
+
+launchpad-buildd (95) hardy; urgency=low
+
+  * Add explicit dependency on pristine-tar, recommended by newer
+    versions of bzr-builder.
+  * Fix finding of upstream build directory after recipe builds.
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 13:18:51 +0100
+
+launchpad-buildd (94) hardy; urgency=low
+
+  * Auto-start on machines whose hostname fqdn ends in .buildd or .ppa.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 17:26:20 +1100
+
+launchpad-buildd (93) hardy; urgency=low
+
+  * Rename buildd-genconfig in the tree, rather than during install.
+  * Symlink check_implicit_function_pointers rather than copying.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 16:26:17 +1100
+
+launchpad-buildd (92) hardy; urgency=low
+
+  * Use debhelper for more of the package build.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 16:01:03 +1100
+
+launchpad-buildd (91) hardy; urgency=low
+
+  * launchpad-buildd will not start unless you set
+    RUN_NETWORK_REQUESTS_AS_ROOT=yes in /etc/default/launchpad-buildd.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 15:02:12 +1100
+
+launchpad-buildd (90) hardy; urgency=low
+
+  * debhelper is a Build-Depends because it is needed to run 'clean'.
+  * python-lpbuildd conflicts with launchpad-buildd << 88.
+  * Add and adjust build-arch, binary-arch, build-indep to match policy.
+  * Complies with stardards version 3.9.2.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 14:30:36 +1100
+
+launchpad-buildd (89) hardy; urgency=low
+
+  * Add debian/copyright file.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 13:12:22 +1100
+
+launchpad-buildd (88) hardy; urgency=low
+
+  * Separate python-lpbuildd from the main launchpad-buildd package, so that
+    it can be used alone for integration tests.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Fri, 11 Nov 2011 12:43:20 +1100
+
+launchpad-buildd (87) hardy; urgency=low
+
+  * Split launchpad-buildd completely out of the Launchpad source tree.
+  * Rename the Python package to lpbuildd.
+
+ -- Martin Pool <mbp@xxxxxxxxxxxxx>  Wed, 09 Nov 2011 20:04:02 +1100
+
+launchpad-buildd (86) hardy-cat; urgency=low
+
+  * Cope with orig tarballs in the recipe result directory.
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Thu, 10 Nov 2011 19:16:44 +0100
+
+launchpad-buildd (85) hardy-cat; urgency=low
+
+  * buildrecipe: Fix env argument to call_report_rusage.
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Thu, 10 Nov 2011 17:34:57 +0100
+
+launchpad-buildd (84) hardy-cat; urgency=low
+
+  * Fix import of check_call in buildrecipe.
+  * Avoid using /usr/bin/env in buildrecipe, breaks use of -u argument to
+  Python.
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Thu, 10 Nov 2011 14:55:10 +0100
+
+launchpad-buildd (83) hardy-cat; urgency=low
+
+  [ Martin Pool ]
+   * Cut out readyservice from the buildds.  LP: #800295
+   * buildrecipe shows the bzr and bzr-builder versions.  LP: #884092
+   * buildrecipe shows bzr rusage.  LP: #884997
+
+  [ Steve Langasek ]
+  * Strip :any, :native qualifiers off all build-dependencies in sbuild, since
+    the distinction only matters once we want to do cross-building.
+
+  [ Jelmer Vernooij ]
+  * Pass --allow-fallback-to-native to "bzr dailydeb" for compatibility
+    with older recipe build behaviour. Depend on bzr-builder >= 0.7.1
+    which introduces this option. LP: #885497
+
+ -- Jelmer Vernooij <jelmer@xxxxxxxxxxxxx>  Wed, 09 Nov 2011 14:57:35 +0100
+
+launchpad-buildd (81) hardy-cat; urgency=low
+
+  * generate-translation-templates: switch to Python 2.7.
+
+ -- Danilo Šegan <danilo@xxxxxxxxxxxxx>  Mon, 17 Oct 2011 14:46:13 +0200
+
+launchpad-buildd (80) hardy-cat; urgency=low
+
+  * binfmt-support demonstrated umount ordering issues for us.  LP: #851934
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Mon, 19 Sep 2011 04:56:58 -0600
+
+launchpad-buildd (79) hardy-cat; urgency=low
+
+  * Fix sudoers.d/buildd permissions
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Fri, 19 Aug 2011 07:31:54 -0600
+
+launchpad-buildd (78) hardy-cat; urgency=low
+
+  * Correctly update sudoers files when needed.  LP: #742881
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Wed, 06 Apr 2011 22:20:17 -0600
+
+launchpad-buildd (77) hardy-cat; urgency=low
+
+  * Add back in ultimate-backstop umask() correction.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Wed, 06 Apr 2011 13:34:05 -0600
+
+launchpad-buildd (76) hardy-cat; urgency=low
+
+  [ various ]
+  * ProjectGroup.products sort order and remove Author: comments.
+  * Fix some tests to not print stuff
+  * Make buildd pointer check regexes work on natty
+  * merge before rollout + text conflict patch by wgrant
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 15 Mar 2011 16:59:36 -0600
+
+launchpad-buildd (74) hardy-cat; urgency=low
+
+  [ Aaron Bentley]
+  * Memory-limit recipe builds. LP#676657
+
+  [ LaMont Jones]
+  * mount a tmpfs on /dev/shm in build chroots.  LP#671441
+
+  [Michael Bienia]
+  * Update regexes used for DEPWAIT.  LP#615286
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 23 Nov 2010 06:17:57 -0700
+
+launchpad-buildd (73) hardy-cat; urgency=low
+
+  * Revert to revision 70
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Thu, 28 Oct 2010 12:53:45 -0600
+
+launchpad-buildd (72) hardy-cat; urgency=low
+
+  * break out readyservice.py from tachandler.py. LP#663828
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Wed, 20 Oct 2010 13:03:23 -0600
+
+launchpad-buildd (71) hardy-cat; urgency=low
+
+  * Detect ppa hosts for build recipes.  LP#662664
+  * Better recipe builds. LP#599100, 627119, 479705
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 19 Oct 2010 13:48:33 -0600
+
+launchpad-buildd (70) hardy-cat; urgency=low
+
+  [ LaMont Jones ]
+  * Restore the rest of version 68.
+
+  [ James Westby ]
+  * buildrecipe: Specify BZR_EMAIL via sudo so that the called command
+    sees the environment variable.
+  * buildrecipe: call sudo -i -u instead of sudo -iu so that it works with
+    older versions of sudo.
+  * buildrecipe: flush stdout before calling another command so that
+    the build log has the output correctly interleaved.
+
+  [ William Grant ]
+  * correct arch_tag arguments.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Fri, 20 Aug 2010 13:27:55 -0600
+
+launchpad-buildd (69) hardy-cat; urgency=low
+
+  * REVERT all of version 68 except for BZR_EMAIL LP#617072
+    (Not reflected in bzr.)
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 17 Aug 2010 10:40:03 -0600
+
+launchpad-buildd (68) hardy-cat; urgency=low
+
+  [ William Grant ]
+  * Take an 'arch_tag' argument, so the master can override the slave
+    architecture.
+
+  [ Jelmer Vernooij ]
+
+  * Explicitly use source format 1.0.
+  * Add LSB information to init script.
+  * Use debhelper >= 5 (available in dapper, not yet deprecated in
+    maverick).
+  * Fix spelling in description.
+  * Install example buildd configuration.
+
+  [ Paul Hummer ]
+  * Provide BZR_EMAIL for bzr 2.2 in the buildds LP#617072
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Mon, 16 Aug 2010 13:25:09 -0600
+
+launchpad-buildd (67) hardy-cat; urgency=low
+
+  * Force aptitude installation for recipe builds on maverick
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Fri, 23 Jul 2010 14:22:23 -0600
+
+launchpad-buildd (66) hardy-cat; urgency=low
+
+  * handle [linux-any] build-dependencies.  LP#604981
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Mon, 19 Jul 2010 12:13:31 -0600
+
+launchpad-buildd (65) hardy-cat; urgency=low
+
+  * Drop preinst check, since human time does not scale across a large
+    rollout.  soyuz just needs to deal with upgrades mid-build better.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Thu, 08 Jul 2010 05:04:02 -0600
+
+launchpad-buildd (64) hardy-cat; urgency=low
+
+  * Pottery now strips quotes from variables.
+
+ -- Jeroen Vermeulen <jtv@xxxxxxxxxxxxx>  Wed, 30 Jun 2010 12:50:59 +0200
+
+launchpad-buildd (63) hardy-cat; urgency=low
+
+  * Drop apply-ogre-model, since override-sources-list replaced it three years
+    ago. Also clean up extra_args parsing a bit.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Sat, 12 Jun 2010 11:33:11 +1000
+
+launchpad-buildd (62) hardy-cat; urgency=low
+
+  * Make the buildds cope with not having a sourcepackagename LP#587109
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Tue, 08 Jun 2010 13:02:31 -0600
+
+launchpad-buildd (61) hardy-cat; urgency=high
+
+  [ William Grant ]
+  * Fixed translation templates slave to return files properly. LP#549422
+
+  [ Danilo Segan ]
+  * Added more output to generate-translation-templates. LP#580345
+
+  [ Henning Eggers ]
+  * Improved output of build xmplrpc call, not returning None now. LP#581746
+  * Added apache2 dependency. LP#557634
+  * Added preinst script to prevent installation when a build is running.
+    LP#557347
+
+  [ LaMont Jones ]
+  * preinst needs to detect a stale buildlog as well.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Fri, 21 May 2010 05:52:53 -0600
+
+launchpad-buildd (60) lucid-cat; urgency=low
+
+  * Depends: lsb-release, which is ubuntu-minimal, but not essential.
+
+ -- LaMont Jones <lamont@xxxxxxxxxx>  Thu, 01 Apr 2010 08:54:48 -0600
+
+launchpad-buildd (59) lucid-cat; urgency=low
+
+  [ Henning Eggers ]
+  * Added translation template generation code (pottery).
+
+  [ LaMont Jones ]
+  * set umask for twisted where supported
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Wed, 31 Mar 2010 10:38:15 -0600
+
+launchpad-buildd (58~1) karmic; urgency=low
+
+  * Misc fixes to match APIs.
+
+ -- Aaron Bentley <aaron@xxxxxxxxxxxxxxxx>  Fri, 15 Jan 2010 10:03:07 +1300
+
+launchpad-buildd (58~0) karmic; urgency=low
+
+  * Include buildrecipe.py.
+
+ -- Aaron Bentley <aaron@xxxxxxxxxxxxxxxx>  Wed, 13 Jan 2010 17:06:59 +1300
+
+launchpad-buildd (57) hardy-cat; urgency=low
+
+  * Split the sbuild wrapper from DebianBuildManager into a new
+    BinaryPackageBuildManager, and point the 'debian' builder at that
+    instead.
+
+ -- William Grant <wgrant@xxxxxxxxxx>  Tue, 12 Jan 2010 09:22:50 +1300
+
+launchpad-buildd (56) hardy-cat; urgency=low
+
+  * only error out on implicit-function-pointers check on lucid or later,
+    non-32-bit architectures.  Warnings elsewhere.  LP#504078
+  * drop use of ccache and /var/cache/apt/archives, since we don't use one,
+    and the other is just plain silly.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Mon, 11 Jan 2010 13:12:49 -0700
+
+launchpad-buildd (54) hardy-cat; urgency=low
+
+  [ William Grant ]
+  * debian.py: Tell sbuild to build debug symbols if the
+    build_debug_symbols argument is True.
+  * sbuild: Set "Build-Debug-Symbols: yes" in CurrentlyBuilding if
+    we have been told to build debug symbols.
+
+  [ LaMont Jones ]
+  * do not ignore SIGHUP in builds - it breaks test suites. LP#453460
+  * create filecache-default/ccache directories in init.d as well as postinst
+  * sbuild: run dpkg-source inside the chroot.  LP#476036
+  * sbuild: change the regexp for dpkg-source extraction to handle both karmic and pre-karmic dpkg.  LP#476036
+  * use --print-architecture instead of --print-installation-architecture
+  * mount-chroot: copy hosts et al into chroot. LP#447919
+  * provide and call check-implicit-function-pointers.
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Mon, 14 Dec 2009 12:00:10 -0700
+
+launchpad-buildd (52) dapper-cat; urgency=low
+
+  * Depends: apt-transport-https
+
+ -- LaMont Jones <lamont@xxxxxxxxxxxxx>  Fri, 09 Oct 2009 11:00:50 -0600
+
+launchpad-buildd (50) dapper-cat; urgency=low
+
+  * sbuild: Change all invocations of apt and dpkg to occur inside
+    the build chroot, rather than happening outside the chroot with
+    a bunch of flags to operate on data files in the chroot.  This
+    should clear up issues we see with mismatched host toolchains.
+  * sbuild: Revert the above in the case of "apt-get source" which
+    doesn't require any fancy features in the chroot and, frankly,
+    is much easier to manage if it's executed externally.
+  * scan-for-processes: Bring in a change from production to make
+    sure that we follow symlinks in our search for process roots.
+  * sbuild-package: Output NR_PROCESSORS in the build logs, for
+    sightly easier debugging of possible parallel build bugs.
+  * update-debian-chroot: Stop using chapt-get, and instead chroot
+    into the build chroot and call the native apt-get there.
+  * update-debian-chroot: Cargo-cult the linux32 magic from the
+    sbuild wrapper to set our personality on chroot upgrades.
+  * mount-chroot: Mount sys in the chroot too.  While it shouldn't
+    be, strictly-speaking, required for anything, it's nice to have.
+  * chapt-get, slave_chroot_tool.py: Delete both as obsolete cruft.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Fri, 24 Jul 2009 07:21:30 -0600
+
+launchpad-buildd (49) dapper-cat; urgency=low
+
+  * sbuild.conf: bump default automake from automake1.8 to automake1.9
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Fri, 12 Sep 2008 08:54:24 -0600
+
+launchpad-buildd (48) dapper-cat; urgency=low
+
+  * sbuild-package: If we're an amd64 host system, but being used
+    to build i386 or lpia, use linux32 to pretend to be i686.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Fri, 12 Sep 2008 08:12:34 -0600
+
+launchpad-buildd (47) dapper-cat; urgency=low
+
+  * slave.py: If the logfile doesn't currently exist on disk when
+    getLogTail() goes looking for it (which is a possible race with
+    the new sanitisation code), just return an empty string.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Mon, 02 Jun 2008 13:09:55 -0600
+
+launchpad-buildd (46) dapper-cat; urgency=low
+
+  * slave.py: Accept a separate username and password to the
+    ensurePresent() call which, if present, are used to install
+    an auth handler to cope with basic http auth with the http
+    server when fetching files.
+  * slave.py: Ensure that build logs are sanitized so that any
+    user:password@ parts in URLs are removed.
+
+ -- Julian Edwards <julian.edwards@xxxxxxxxxxxxx>  Tue, 29 Apr 2008 14:25:00 +0100
+
+launchpad-buildd (45) dapper-cat; urgency=low
+
+  * slave.py: Stop setting BuilderStatus.WAITING in each failure
+    method, as this gives us a race where the builddmaster might
+    dispatch another build to us before we're done cleaning up.
+  * slave.py: Don't set BuildStatus.OK in buildComplete(), this is
+    now a generic "the build has ended, succesfully or not" method.
+  * slave.py: Define a new buildOK() method that sets BuildStatus.OK.
+  * debian.py: When done cleaning, if the build isn't already marked
+    as failed, call buildOK, then call buildComplete unconditionally.
+  * The above changes should resolve https://launchpad.net/bugs/179466
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Tue, 08 Apr 2008 14:12:07 -0600
+
+launchpad-buildd (44) dapper-cat; urgency=low
+
+  * slave.py: Redefine "private" _unpackChroot() as "public" doUnpack(),
+    so we can use it from the build iteration control process.
+  * slave.py: Make the initiate method set a _chroottarfile private
+    variable for use by doUnpack(), rather than calling _unpackChroot().
+  * slave.py: Trigger the forked buildd process with an echo statement.
+  * debian.py: Add the INIT state to the DebianBuildState class.
+  * debian.py: Start the build process at INIT state instead of UNPACK.
+  * debian.py: Add iterate_INIT(), which just checks success of the
+    initial variable sanitisation checks, then hands off to doUnpack().
+  * debian.py: Adjust the failure return calls of the UNPACK and MOUNT
+    methods to chrootFail() instead of builderFail(), for correctness.
+  * The above changes should resolve https://launchpad.net/bugs/211974
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Mon, 07 Apr 2008 13:53:20 -0600
+
+launchpad-buildd (43) dapper-cat; urgency=low
+
+  * unpack-chroot: Move the ntpdate calls below the bunzip/exec bit,
+    so we don't run ntpdate twice when unzipping tarballs, which
+    happens on every single build on Xen hosts (like the PPA hosts).
+  * debian/control: We use adduser in postinst, depending on it helps.
+  * debian/control: Set myself as the Maintainer, since I'm in here.
+  * debian/control: Change our section from "misc" to "admin".
+  * sbuild{,-package}: Pass DEB_BUILD_OPTIONS="parallel=N" to dpkg.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Thu, 24 Jan 2008 15:39:20 -0700
+
+launchpad-buildd (42) dapper-cat; urgency=low
+
+  * sbuild: using "eq" to evaluate strings instead of "==" is ever
+    so slightly less retarded (fixed the launchpad bug #184565)
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Tue, 22 Jan 2008 16:21:54 -0700
+
+launchpad-buildd (41) dapper-cat; urgency=low
+
+  * sbuild: If we've already marked a package as "installed" with a
+    valid version, don't overwrite that version with PROVIDED.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Thu, 17 Jan 2008 10:39:26 -0700
+
+launchpad-buildd (40) dapper-cat; urgency=low
+
+  * sbuild: Don't allow versioned build-deps to be satisfied by provided
+    packages, but force them to go through the "upgrade/downgrade" tests.
+  * sbuild: Do --info and --contents on _all.deb packages as well, if
+    we're building arch:all packages.
+  * sbuild: Don't process ENV_OVERRIDE anymore, we only had an override
+    for one thing anyway (LC_ALL), and this code caused bug #87077.
+  * sbuild-package: Call sbuild with LC_ALL=C explicitely, to compensate.
+  * Makefile: clean up the makefile a bit to DTRT (as I expect it).
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Tue, 15 Jan 2008 16:51:08 -0700
+
+launchpad-buildd (39) unstable; urgency=low
+
+  * If we're fed an archive_purpose argument from the builddmaster,
+    we pass --purpose=$archive_purpose to sbuild, and if we get suite
+    from the builddmaster, we pass --dist=$suite to sbuild.
+  * Mangle sbuild to write out Suite: and Purpose: stanzas to our
+    CurrentlyBuilding file, according to command-line input.
+  * Now that we're no longer always feeding -dautobuild to sbuild,
+    fix up sbuild to always look for the chroot at chroot-autobuild
+    instead of the Debian Way of using chroot-$suite.
+  * If the config file contains an ntphost stanza, use that with
+    ntpdate to sync the system's clock before we unpack the chroot.
+  * Mangle update-config to add an ntphost stanza to the default
+    config, and to 's/-dautobuild //' from the sbuild arguments.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Thu, 20 Dec 2007 01:51:49 -0700
+
+launchpad-buildd (38) unstable; urgency=high
+
+  * unpack-chroot: set $PATH rather than hardcoding paths to binaries
+    since bzip2 moved from /usr/bin to /bin in edgy and didn't bother with
+    compatability symlinks.
+
+ -- James Troup <james.troup@xxxxxxxxxxxxx>  Wed, 21 Nov 2007 17:08:36 +0000
+
+launchpad-buildd (37) dapper; urgency=high
+
+  * update-debian-chroot: Adam's LPIA support (i.e. overriding
+    architecture for chapt-get).
+  * debian/launchpad-buildd.cron.daily: fix run-on-line.
+  * debian/postinst: only create ~buildd/.sbuildrc if it doesn't exist.
+    This avoids the problem of upgrades of the launchpad-buildd package
+    resetting the architecture to i386 on lpia builders.
+
+ -- James Troup <james.troup@xxxxxxxxxxxxx>  Wed, 14 Nov 2007 18:34:46 +0000
+
+launchpad-buildd (36) dapper; urgency=low
+
+  * changing override-sources to replace current sources.list with
+    the content sent by buildmaster instead of prepend. It will allow
+    us to cope more easily with SoyuzArchive implementation (PARTNER,
+    EMBARGOED, PPA)
+
+ -- Celso Providelo <cprov@xxxxxxxxxxxxx>  Thu, 7 Aug 2007 14:10:26 -0300
+
+launchpad-buildd (35) unstable; urgency=low
+
+  * including previous code changes (32 & 33).
+
+ -- Celso Providelo <cprov@xxxxxxxxxxxxx>  Thu, 23 May 2007 17:40:26 -0300
+
+launchpad-buildd (34) unstable; urgency=low
+
+  * add suport for overriding the chroot /etc/apt/sources.list with the
+    content of builddmaster build arguments 'archives'.
+
+ -- Celso Providelo <cprov@xxxxxxxxxxxxx>  Thu, 17 May 2007 15:12:26 -0300
+
+launchpad-buildd (33) unstable; urgency=low
+
+  * Mangle sbuild further to allow us to publish Martin's debug debs (ddeb)
+    to public_html/ddebs/ until such a time as soyuz can do this natively.
+  * Fix the auto-dep-wait regexes to allow for versions with ~ in them.
+  * Make cron.daily clean out translations and ddebs more than 1 week old.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Sat, 30 Sep 2006 17:25:25 +1000
+
+launchpad-buildd (32) unstable; urgency=low
+
+  * We need to create /var/run/launchpad-buildd in our init script in the
+    case (such as in current dapper) where /var/run is on a tmpfs.
+  * Our init script shouldn't exit non-zero on "stop" if already stopped.
+  * Remove exc_info argument from our call to self.log in slave.py, which
+    clearly doesn't support that argument, so stop producing tracebacks.
+  * Reset self.builddependencies in our clean routine, so the variable
+    doesn't get leaked to the next build, causing me SERIOUS confusion.
+  * Tidy up translation handling a bit more to deal with old chroots (where
+    pkgstriptranslations won't dpkg-distaddfile for us), and to chmod the
+    translation dirs after the build, so apache can actually get at them.
+  * Add --no_save to our command line to avoid useless -shutdown.tap files.
+  * Make sure umount-chroot doesn't fail, even if there's nothing to umount.
+  * Append to the cron.daily cleaning to also occasionally clean up the apt
+    cache and /home/buildd/filecache-default, so we don't run out of disk.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Fri, 17 Mar 2006 19:39:05 +1100
+
+launchpad-buildd (31) unstable; urgency=low
+
+  * Cherry-pick patch from Ryan's sbuild that outputs dpkg --purge output
+    line-by-line, instead of as one big blob, to make output on the web
+    UI a little bit more friendly for people following along at home.
+  * Install a cron.daily script (eww) to purge old build logs for now until
+    I have the time to learn how twisted's native log rotation works.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Wed, 15 Mar 2006 17:23:26 +1100
+
+launchpad-buildd (30) unstable; urgency=low
+
+  * Move our translation publishing mojo so it happens BEFORE we move
+    all the files from debian/files out of the chroot, instead of after.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Wed,  8 Mar 2006 18:50:49 +1100
+
+launchpad-buildd (29) unstable; urgency=low
+
+  * Use dpkg --print-installation-architecture in our postinst instead
+    of --print-architecture to avoid spewing suprious error messages.
+  * Remove the check for log_dir, since we call sbuild with --nolog,
+    and stop creating $HOME/logs in the user setup part of postinst.
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Tue,  7 Mar 2006 19:13:56 +1100
+
+launchpad-buildd (28) unstable; urgency=low
+
+  * Modify the protocol method ensurepresent to return additional
+    information about the target files lookup procedure. It helps to
+    debug intermittent Librarian errors.
+
+ -- Celso Providelo <celso.providelo@xxxxxxxxxxxxx>  Mon, 06 Mar 2006 16:42:00 -0300
+
+launchpad-buildd (27) unstable; urgency=low
+
+  * Update the slave chroot tool to use getent so it works on the production
+    buildds
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Mon, 20 Feb 2006 12:57:45 +0000
+
+launchpad-buildd (26) unstable; urgency=low
+
+  * Update buildd-slave code to allow for GIVENBACK status returns,
+    matching the states under which sbuild used to do --auto-give-back.
+  * Port over sanae's build log regex parsing to allow us to do:
+    - Automatic dep-wait handling, based on sbuild's logs of apt-get.
+    - Automatic give-backs for a few corner cases (like kernel bugs).
+  * Make sbuild stop dying if we have no sendmail installed, since we
+    don't really want it sending mail in the launchpad world anyway.
+  * Call sbuild and apt with "LANG=C", so we don't have to worry about
+    locales matching between the base system and the autobuild chroots.
+  * Clear up confusion in build states with 's/BUILDFAIL/PACKAGEFAIL/'
+
+ -- Adam Conrad <adconrad@xxxxxxxxxx>  Mon, 27 Feb 2006 14:00:08 +1100
+
+launchpad-buildd (25) unstable; urgency=low
+
+  * Update sbuild.conf to current yumminess.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri,  3 Feb 2006 19:22:01 +0000
+
+launchpad-buildd (24) unstable; urgency=low
+
+  * Add /var/cache/apt/archives to the buildd chroots when mounting
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri,  3 Feb 2006 00:30:07 +0000
+
+launchpad-buildd (23) unstable; urgency=low
+
+  * And make apply-ogre-model use $SUDO, yay
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri, 27 Jan 2006 13:59:10 +0000
+
+launchpad-buildd (22) unstable; urgency=low
+
+  * Fix typo in apply-ogre-model (missing space)
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri, 27 Jan 2006 13:55:12 +0000
+
+launchpad-buildd (21) unstable; urgency=low
+
+  * Fix the .extend call for the --comp argument to pass it as one argument
+    instead of as - - c o m p = m a i n (which kinda doesn't work)
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri, 27 Jan 2006 13:45:34 +0000
+
+launchpad-buildd (20) unstable; urgency=low
+
+  * Update sbuild to the latest sbuild from adam.
+  * Make sure we pass --archive=ubuntu
+  * Make sure we pass --comp=<the component we're building for>
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Thu, 26 Jan 2006 17:20:49 +0000
+
+launchpad-buildd (19) unstable; urgency=low
+
+  * Add ogre support to the slave chroot tool
+  * Make sure the chroot tool ensures localhost in /etc/hosts in the chroot
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed, 25 Jan 2006 12:29:04 +0000
+
+launchpad-buildd (18) unstable; urgency=low
+
+  * Remove sbuildrc.tmp dangleberry in postinst
+  * Add linux32 to set of depends so that hppa, sparc and powerpc can build
+  * Make hppa, sparc, powerpc use linux32 to invoke the sbuild binary
+  * Add --resolve-deps to debootstrap invocation
+  * Make chroot tool use /bin/su - rather than /bin/sh for chrooting. shiny
+    (apparently)
+  * Add a bunch of deps infinity spotted.
+  * Make sure we chown the chroot tarball to the calling user after packing
+    it up.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed,  9 Nov 2005 17:37:37 -0500
+
+launchpad-buildd (17) unstable; urgency=low
+
+  * Changed default UID/GID to match the ldap buildd UID/GID
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed,  9 Nov 2005 17:13:22 -0500
+
+launchpad-buildd (16) unstable; urgency=low
+
+  * Change the XMLRPC method 'ensure' to be 'ensurepresent'
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed,  5 Oct 2005 15:50:58 +0100
+
+launchpad-buildd (15) unstable; urgency=low
+
+  * Fix it so getting a logtail when less than 2k is available will work.
+  * Actually install apply-ogre-model
+  * Also spot arch_indep properly
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Mon,  3 Oct 2005 14:34:55 +0100
+
+launchpad-buildd (14) unstable; urgency=low
+
+  * Slight bug in slave.py meant missing .emptyLog() attribute. Fixed.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Mon,  3 Oct 2005 14:21:16 +0100
+
+launchpad-buildd (13) unstable; urgency=low
+
+  * Fix a syntax error in the postinst
+  * Oh, and actually include the buildd config upgrader
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Mon,  3 Oct 2005 12:17:50 +0100
+
+launchpad-buildd (12) unstable; urgency=low
+
+  * Implement V1.0new protocol.
+  * Add in OGRE support
+  * Add in archindep support
+  * If upgrading from < v12, will remove -A from sbuildargs and add in
+    a default ogrepath to any buildd configs found in /etc/launchpad-buildd
+  * Prevent launchpad-buildd init from starting ~ files
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Sun,  2 Oct 2005 23:20:08 +0100
+
+launchpad-buildd (11) unstable; urgency=low
+
+  * Quieten down the slave scripts and make them prettier for the logs.
+  * make unpack-chroot uncompress the chroot and keep it uncompressed if
+    possible. This fixes bug#2699
+  * Make the slave run the process reaper run even if the build failed.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri, 30 Sep 2005 00:24:45 +0100
+
+launchpad-buildd (10) unstable; urgency=low
+
+  * Make sure /etc/source-dependencies is present in the postinst.
+    (just need to be touched)
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed, 28 Sep 2005 22:02:26 +0100
+
+launchpad-buildd (9) unstable; urgency=low
+
+  * Implement /filecache/XXX urls in the slave to permit larger file transfer
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Tue, 27 Sep 2005 13:16:52 +0100
+
+launchpad-buildd (8) unstable; urgency=low
+
+  * spiv's crappy spawnFDs implementation needs an int not a file handle
+    and can't cope with converting one to the other :-(
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Tue, 27 Sep 2005 02:18:05 +0100
+
+launchpad-buildd (7) unstable; urgency=low
+
+  * Made sure the slave puts /dev/null on the subprocess stdin.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Tue, 27 Sep 2005 01:52:50 +0100
+
+launchpad-buildd (6) unstable; urgency=low
+
+  * Removed slavechroot.py from installed set.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Thu, 15 Sep 2005 11:39:25 +0100
+
+launchpad-buildd (5) unstable; urgency=low
+
+  * Add slave tool and example chroot configuration
+  * Added debootstrap and dpkg-dev to the dependencies
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri,  9 Sep 2005 16:38:22 +0100
+
+launchpad-buildd (4) unstable; urgency=low
+
+  * Add sbuild.conf which was previously missing
+  * Fix up abort protocol and various other bits in the slave
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Fri,  9 Sep 2005 14:24:31 +0100
+
+launchpad-buildd (3) unstable; urgency=low
+
+  * Modified postinst to make sure ccache and log dirs are created
+    even if the user already exists.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Wed,  7 Sep 2005 15:50:36 +0100
+
+launchpad-buildd (2) unstable; urgency=low
+
+  * Fixes to postinst to make sure ccache and log dirs are created if missing.
+  * Added README to explain how to build the package.
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Thu,  1 Sep 2005 10:46:08 +0100
+
+launchpad-buildd (1) unstable; urgency=low
+
+  * Initial version
+
+ -- Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>  Mon, 13 Jun 2005 11:08:38 +0100
+
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 0000000..5792b40
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1 @@
+buildd-example.conf
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..e4b5dc3
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,82 @@
+Source: launchpad-buildd
+Section: admin
+Priority: extra
+Maintainer: Launchpad Developers <launchpad-dev@xxxxxxxxxxxxxxxxxxx>
+Uploaders: Colin Watson <cjwatson@xxxxxxxxxx>,
+           Tom Wardill <tom.wardill@xxxxxxxxxxxxx>,
+Standards-Version: 3.9.5
+Build-Depends: apt-utils,
+               bzr,
+               curl,
+               debhelper (>= 9.20160709~),
+               dh-exec,
+               dh-python,
+               git,
+               gpg,
+               intltool,
+               python3 (>= 3.6),
+               python3-apt,
+               python3-debian,
+               python3-fixtures,
+               python3-netaddr,
+               python3-pylxd,
+               python3-requests,
+               python3-responses,
+               python3-setuptools,
+               python3-systemfixtures,
+               python3-testtools,
+               python3-twisted (>= 16.4.0),
+               python3-txfixtures,
+               python3-yaml,
+# We don't use the bits of pylxd that require this at run-time, but at
+# build-time pybuild fails if it's not available.
+               python3-ws4py,
+               python3-zope.interface,
+Vcs-Git: https://git.launchpad.net/launchpad-buildd
+Vcs-Browser: https://git.launchpad.net/launchpad-buildd
+
+Package: launchpad-buildd
+Section: misc
+Architecture: all
+Depends: adduser,
+         bzip2,
+         debootstrap,
+         dmsetup,
+         dnsmasq-base,
+         dpkg-dev,
+         file,
+         gpg,
+         lsb-release,
+         lsof,
+         ntpdate,
+         pristine-tar,
+         python3,
+         python3-apt,
+         python3-lpbuildd (= ${source:Version}),
+         sbuild,
+         schroot,
+         sudo,
+         ${misc:Depends},
+Breaks: python-lpbuildd (<< 190~)
+Replaces: python-lpbuildd (<< 190~)
+Description: Launchpad buildd slave
+ This is the launchpad buildd slave package. It contains everything needed to
+ get a launchpad buildd going apart from the database manipulation required to
+ tell launchpad about the slave instance. If you are creating more than one
+ slave instance on the same computer, be sure to give them independent configs
+ and independent filecaches etc.
+
+Package: python3-lpbuildd
+Section: python
+Architecture: all
+Depends: apt-utils,
+         procps,
+         python3-apt,
+         python3-netaddr,
+         python3-pylxd,
+# Work around missing dependency in python3-pbr.
+         python3-setuptools,
+         ${misc:Depends},
+         ${python3:Depends},
+Description: Python 3 libraries for a Launchpad buildd slave
+ This contains the Python 3 libraries that control the Launchpad buildd slave.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..113e3a4
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,23 @@
+This package is maintained by the Canonical Launchpad team <launchpad-dev@xxxxxxxxxxxxxxxxxxx>.
+
+The upstream source was downloaded from <https://launchpad.net/launchpad-buildd>.
+
+Copyright and licence terms:
+
+	Launchpad is Copyright 2004-2011 Canonical Ltd.
+
+	Canonical Ltd ("Canonical") distributes the Launchpad source code
+	under the GNU Affero General Public License, version 3 ("AGPLv3").
+
+	The image and icon files in Launchpad are copyright Canonical, and
+	unlike the source code they are not licensed under the AGPLv3.
+	Canonical grants you the right to use them for testing and development
+	purposes only, but not to use them in production (commercially or
+	non-commercially).
+
+	The Launchpad name and logo are trademarks of Canonical, and may not
+	be used without the prior written permission of Canonical.
+
+	Third-party copyright in this distribution is noted where applicable.
+
+	All rights not expressly granted are reserved.
diff --git a/debian/launchpad-buildd-generator b/debian/launchpad-buildd-generator
new file mode 100755
index 0000000..797f453
--- /dev/null
+++ b/debian/launchpad-buildd-generator
@@ -0,0 +1,19 @@
+#! /bin/sh
+set -e
+
+# Generate systemd unit dependency symlinks for all configured
+# launchpad-buildd instances.
+
+wantdir="$1/launchpad-buildd.service.wants"
+template=/lib/systemd/system/launchpad-buildd@.service
+
+mkdir -p "$wantdir"
+
+for conf in /etc/launchpad-buildd/*; do
+    # Skip nonexistent files (perhaps due to the glob matching no files).
+    [ -e "$conf" ] || continue
+    # Skip backup files.
+    case $conf in -*|*~) continue ;; esac
+
+    ln -s "$template" "$wantdir/launchpad-buildd@${conf##*/}.service"
+done
diff --git a/debian/launchpad-buildd.cron.daily b/debian/launchpad-buildd.cron.daily
new file mode 100644
index 0000000..1c6d660
--- /dev/null
+++ b/debian/launchpad-buildd.cron.daily
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+# Copyright 2009-2013 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+CLEANDIRS=""
+CLEANDIRS="$CLEANDIRS /home/buildd/filecache-default/"
+
+for cleandir in $CLEANDIRS; do
+  [ ! -d "$cleandir" ] || find "$cleandir" -mindepth 1 -mtime +2 \
+			  -not -name buildlog -print0 | \
+			  xargs -r -0 rm -r
+done
diff --git a/debian/launchpad-buildd.dirs b/debian/launchpad-buildd.dirs
new file mode 100644
index 0000000..8ef1bd0
--- /dev/null
+++ b/debian/launchpad-buildd.dirs
@@ -0,0 +1,2 @@
+etc/launchpad-buildd
+var/log/launchpad-buildd
diff --git a/debian/launchpad-buildd.examples b/debian/launchpad-buildd.examples
new file mode 100644
index 0000000..5792b40
--- /dev/null
+++ b/debian/launchpad-buildd.examples
@@ -0,0 +1 @@
+buildd-example.conf
diff --git a/debian/launchpad-buildd.install b/debian/launchpad-buildd.install
new file mode 100644
index 0000000..301561a
--- /dev/null
+++ b/debian/launchpad-buildd.install
@@ -0,0 +1,12 @@
+bin/builder-prep			usr/share/launchpad-buildd/bin
+bin/buildrecipe				usr/share/launchpad-buildd/bin
+bin/check-implicit-pointer-functions	usr/share/launchpad-buildd/bin
+bin/in-target				usr/share/launchpad-buildd/bin
+bin/lpbuildd-git-proxy			usr/share/launchpad-buildd/bin
+bin/sbuild-package			usr/share/launchpad-buildd/bin
+buildd-genconfig			usr/share/launchpad-buildd
+debian/launchpad-buildd-generator	lib/systemd/system-generators
+debian/upgrade-config			usr/share/launchpad-buildd
+default/launchpad-buildd 		etc/default
+sbuildrc				usr/share/launchpad-buildd
+template-buildd.conf			usr/share/launchpad-buildd
diff --git a/debian/launchpad-buildd.links b/debian/launchpad-buildd.links
new file mode 100755
index 0000000..9da9c20
--- /dev/null
+++ b/debian/launchpad-buildd.links
@@ -0,0 +1,2 @@
+#! /usr/bin/dh-exec
+${LIBDIR}/lpbuildd/buildd.tac	usr/lib/launchpad-buildd/buildd.tac
diff --git a/debian/launchpad-buildd.logrotate b/debian/launchpad-buildd.logrotate
new file mode 100644
index 0000000..67c12a3
--- /dev/null
+++ b/debian/launchpad-buildd.logrotate
@@ -0,0 +1,13 @@
+/var/log/launchpad-buildd/*.log {
+    rotate 14
+    daily
+    dateext
+    delaycompress
+    compress
+    notifempty
+    missingok
+    create 0644 buildd buildd
+    postrotate
+        service launchpad-buildd reload
+    endscript
+}
diff --git a/debian/launchpad-buildd.maintscript b/debian/launchpad-buildd.maintscript
new file mode 100644
index 0000000..cfeedce
--- /dev/null
+++ b/debian/launchpad-buildd.maintscript
@@ -0,0 +1 @@
+rm_conffile /etc/init.d/launchpad-buildd 210~
diff --git a/debian/launchpad-buildd.service b/debian/launchpad-buildd.service
new file mode 100644
index 0000000..ca425b3
--- /dev/null
+++ b/debian/launchpad-buildd.service
@@ -0,0 +1,15 @@
+# This service is really a systemd target, but we use a service since
+# targets cannot be reloaded.  See launchpad-buildd@.service for instance
+# configuration.
+
+[Unit]
+Description=Launchpad build daemon
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/bin/true
+ExecReload=/bin/true
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/launchpad-buildd@.service b/debian/launchpad-buildd@.service
new file mode 100644
index 0000000..23cc469
--- /dev/null
+++ b/debian/launchpad-buildd@.service
@@ -0,0 +1,30 @@
+[Unit]
+Description=Launchpad build daemon (%i)
+PartOf=launchpad-buildd.service
+Before=launchpad-buildd.service
+ReloadPropagatedFrom=launchpad-buildd.service
+After=network.target time-sync.target cloud-init.service
+# Useful for certain kinds of image builds.
+After=modprobe@nbd.service
+Requires=modprobe@nbd.service
+
+[Service]
+Type=simple
+RuntimeDirectory=launchpad-buildd
+LogsDirectory=launchpad-buildd
+User=buildd
+SupplementaryGroups=lxd
+EnvironmentFile=-/etc/default/launchpad-buildd
+Environment=BUILDD_CONFIG=/etc/launchpad-buildd/%i
+# When enabled, launchpad-buildd accepts network commands and runs them as
+# root.  If you are sure this server will only be reachable by trusted
+# machines, edit /etc/default/launchpad-buildd.
+ExecStartPre=/usr/bin/test ${RUN_NETWORK_REQUESTS_AS_ROOT} = yes
+ExecStartPre=/usr/bin/install -d /home/buildd/filecache-default
+ExecStart=/usr/bin/twistd3 --no_save --pidfile /run/launchpad-buildd/%i.pid --python /usr/lib/launchpad-buildd/buildd.tac --logfile /var/log/launchpad-buildd/%i.log --umask 022 --nodaemon
+# If the machine runs out of memory, killing just about any other process is
+# better than killing launchpad-buildd.
+OOMScoreAdjust=-1000
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/postinst b/debian/postinst
new file mode 100644
index 0000000..e1d93c0
--- /dev/null
+++ b/debian/postinst
@@ -0,0 +1,141 @@
+#!/bin/sh
+#
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# Post install script
+
+set -e
+set -u
+
+USER=${BUILDD_USER:-buildd}
+BUILDDGID=${BUILDD_GID:-2501}
+BUILDDUID=${BUILDD_UID:-2001}
+
+make_buildd()
+{
+ /usr/share/launchpad-buildd/buildd-genconfig --name=default --host=0.0.0.0 --port=8221 --proxy-port=8222 > \
+  /etc/launchpad-buildd/default
+ echo Default buildd created.
+}
+
+case "$1" in
+    configure)
+	getent group buildd >/dev/null 2>&1 ||
+                addgroup --gid $BUILDDGID buildd
+
+	getent passwd buildd >/dev/null 2>&1 ||
+        adduser --ingroup buildd --disabled-login --gecos 'Buildd user' \
+                --uid $BUILDDUID ${USER}
+        adduser --quiet buildd sbuild
+
+	if dpkg --compare-versions "$2" lt-nl 229~; then
+	    # We used to add the buildd user to the lxd group.  This had
+	    # problems with leaking through sbuild, and it required lxd to
+	    # be installed at postinst time, which is problematic now that
+	    # lxd is typically installed as a snap, so we now rely entirely
+	    # on SupplementaryGroups=lxd in the systemd service.  Clean up
+	    # the old group membership.
+	    code=0
+	    sudo deluser --quiet buildd lxd || code=$?
+	    # According to deluser(8):
+	    #   0   Success: The action was successfully executed.
+	    #   3   There is no such group. No action was performed.
+	    #   6   The user does not belong to the specified group.  No
+	    #       action was performed.
+	    case $code in
+		0|3|6) ;;
+		*) exit "$code" ;;
+	    esac
+	fi
+
+	SUDO_VERSION=$(sudo -V | sed -n '/^Sudo version/s/.* //p')
+	if dpkg --compare-versions $SUDO_VERSION lt 1.7 ||
+	   ! grep -q '^#includedir /etc/sudoers.d' /etc/sudoers; then
+		grep -q ^${USER} /etc/sudoers ||
+		    echo "${USER}  ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
+	else
+		if ! sudo -l -U ${USER} | grep -q '(ALL) NOPASSWD: ALL' ||
+		   ! sudo -l -U ${USER} | grep -q umask_override; then
+		    cat << EOF >> /etc/sudoers.d/buildd
+Defaults:buildd umask_override
+Defaults>buildd umask_override
+
+${USER}  ALL=(ALL) NOPASSWD: ALL
+%buildd ALL=(buildd) NOPASSWD: ALL, (root) NOPASSWD: /bin/su - buildd
+%lpbuildd ALL=(ALL) NOPASSWD: ALL
+EOF
+		fi
+		chmod 440 /etc/sudoers.d/buildd
+		chown root:root /etc/sudoers.d/buildd
+	fi
+
+        install -d -o${USER} -m0755 /home/${USER}
+
+	if [ "x$2" != "x" ]; then
+		if dpkg --compare-versions "$2" lt 127; then
+			# .lp-sbuildrc is no longer used.
+			rm -f /home/${USER}/.lp-sbuildrc
+			# .sbuildrc used to be a normal file, but nowadays it's a
+			# symlink.
+			rm -f /home/${USER}/.sbuildrc
+		fi
+	fi
+
+	# Create ~buildd/.sbuildrc if needed
+	if [ ! -f /home/${USER}/.sbuildrc ]; then
+		ln -s /usr/share/launchpad-buildd/sbuildrc /home/${USER}/.sbuildrc
+	fi
+
+	# Prepare a default buildd...
+	test -e /etc/launchpad-buildd/default || make_buildd
+
+	# Create any missing directories and chown them appropriately
+	test -d /home/${USER}/filecache-default || mkdir /home/${USER}/filecache-default
+	chown $USER:buildd /home/${USER}/filecache-default
+
+	chown $USER:buildd /var/log/launchpad-buildd
+
+	# Check for the presence of the /etc/source-dependencies file
+	# which sbuild will rant about the absence of...
+	test -e /etc/source-dependencies || touch /etc/source-dependencies
+
+	# Now check if we're upgrading a previous version...
+	if [ "x$2" != "x" ]; then
+	    for CONFIG in $(ls /etc/launchpad-buildd/* \
+	                  | grep -v "^-" | grep -v "~$"); do
+		/usr/share/launchpad-buildd/upgrade-config $2 $CONFIG
+	    done
+	    if dpkg --compare-versions "$2" lt 124; then
+		# we used to ship /etc/sbuild.conf, but we moved it to
+		# /usr/share and, frankly, don't care if you modified it
+		rm -f /etc/sbuild.conf
+	    fi
+	fi
+
+	# Configure systemd-timesyncd to use the buildd NTP service
+	if which systemd >/dev/null 2>&1; then
+	    eval `grep ntphost /etc/launchpad-buildd/default | sed 's/ //g'`
+	    if [ "${ntphost-}" ]; then
+		mkdir -p /etc/systemd/timesyncd.conf.d
+		cat << EOF > /etc/systemd/timesyncd.conf.d/00-launchpad-buildd.conf
+[Time]
+NTP=$ntphost
+EOF
+	    fi
+	fi
+
+	;;
+    abort-upgrade|abort-remove|abort-deconfigure)
+
+    ;;
+
+    *)
+        echo "postinst called with unknown argument \`$1'" >&2
+        exit 1
+    ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/postrm b/debian/postrm
new file mode 100644
index 0000000..a331fa2
--- /dev/null
+++ b/debian/postrm
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+set -e
+
+if [ "$1" = purge ]; then
+	rm -f /etc/launchpad-buildd/*
+	rm -f /etc/systemd/timesyncd.conf.d/00-launchpad-buildd.conf
+	if [ -d /etc/systemd/timesyncd.conf.d ]; then
+		rmdir -p --ignore-fail-on-non-empty /etc/systemd/timesyncd.conf.d
+	fi
+fi
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/pydist-overrides b/debian/pydist-overrides
new file mode 100644
index 0000000..00e275e
--- /dev/null
+++ b/debian/pydist-overrides
@@ -0,0 +1 @@
+python-debian python-debian (>= 0.1.23)
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..644e7f9
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,23 @@
+#!/usr/bin/make -f
+#
+# Copyright 2009-2020 Canonical Ltd.  
+# 
+# This software is licensed under the GNU Affero General Public License version
+# 3 (see the file LICENSE).
+
+export PYBUILD_NAME := lpbuildd
+export LIBDIR := $(shell python3 -c 'import distutils.sysconfig; print(distutils.sysconfig.get_python_lib())')
+
+%:
+	dh $@ --with=python3,systemd --buildsystem=pybuild
+
+override_dh_auto_build:
+	dh_auto_build
+	python3 buildd-genconfig --template=template-buildd.conf \
+	--arch=i386 --port=8221 --name=default --host=buildd.buildd \
+		> buildd-example.conf
+
+# Required in debhelper compatibility level <=10 to avoid generating
+# postinst fragments to register a nonexistent init.d script.
+override_dh_installinit:
+	dh_installinit -n
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..d3827e7
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+1.0
diff --git a/debian/upgrade-config b/debian/upgrade-config
new file mode 100755
index 0000000..33f38cb
--- /dev/null
+++ b/debian/upgrade-config
@@ -0,0 +1,336 @@
+#!/usr/bin/python3
+#
+# Copyright 2009-2020 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Upgrade a launchpad-buildd configuration file."""
+
+try:
+    from configparser import ConfigParser as SafeConfigParser
+    from configparser import NoOptionError, NoSectionError
+except ImportError:
+    from ConfigParser import (
+        SafeConfigParser,
+        NoOptionError,
+        NoSectionError,
+    )
+
+import os
+import re
+import subprocess
+import sys
+
+import apt_pkg
+
+apt_pkg.init()
+
+(old_version, conf_file) = sys.argv[1:]
+
+bin_path = "/usr/share/launchpad-buildd/slavebin"
+
+
+def upgrade_to_12():
+    print("Upgrading %s to version 12" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev12~"])
+    in_file = open(conf_file + "-prev12~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if line.startswith("[debianmanager]"):
+            line += "ogrepath = %s/apply-ogre-model\n" % bin_path
+        if line.startswith("sbuildargs"):
+            line = line.replace("-A", "")
+        out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_34():
+    print("Upgrading %s to version 34" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev34~"])
+    in_file = open(conf_file + "-prev34~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if line.startswith("[debianmanager]"):
+            line += "sourcespath = %s/override-sources-list\n" % bin_path
+        out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_39():
+    print("Upgrading %s to version 39" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev39~"])
+    in_file = open(conf_file + "-prev39~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if line.startswith("sbuildargs"):
+            line = line.replace("-dautobuild ", "")
+        if line.startswith("[slave]"):
+            line += "ntphost = ntp.buildd\n"
+        out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_57():
+    print("Upgrading %s to version 57" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev57~"])
+    in_file = open(conf_file + "-prev57~")
+    out_file = open(conf_file, "w")
+    # We want to move all the sbuild lines to a new
+    # 'binarypackagemanager' section at the end.
+    binarypackage_lines = []
+    for line in in_file:
+        if line.startswith("sbuild"):
+            binarypackage_lines.append(line)
+        else:
+            out_file.write(line)
+    out_file.write("[binarypackagemanager]\n")
+    for line in binarypackage_lines:
+        out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_58():
+    print("Upgrading %s to version 58" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev58~"])
+    in_file = open(conf_file + "-prev58~")
+    out_file = open(conf_file, "w")
+    out_file.write(in_file.read())
+    out_file.write(
+        "\n[sourcepackagerecipemanager]\n"
+        "buildrecipepath = %s/buildrecipe\n" % bin_path
+    )
+
+
+def upgrade_to_59():
+    print("Upgrading %s to version 59" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev59~"])
+    in_file = open(conf_file + "-prev59~")
+    out_file = open(conf_file, "w")
+    out_file.write(in_file.read())
+    out_file.write(
+        "\n[translationtemplatesmanager]\n"
+        "generatepath = %s/generate-translation-templates\n"
+        "resultarchive = translation-templates.tar.gz\n" % bin_path
+    )
+
+
+def upgrade_to_63():
+    print("Upgrading %s to version 63" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev63~"])
+    in_file = open(conf_file + "-prev63~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if not line.startswith("ogrepath"):
+            out_file.write(line)
+
+
+def upgrade_to_110():
+    print("Upgrading %s to version 110" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev110~"])
+    in_file = open(conf_file + "-prev110~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if line.startswith("[allmanagers]"):
+            line += "preppath = %s/slave-prep\n" % bin_path
+        out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_115():
+    print("Upgrading %s to version 115" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev115~"])
+    in_allmanagers = False
+    in_file = open(conf_file + "-prev115~")
+    out_file = open(conf_file, "w")
+    for line in in_file:
+        if line.startswith("[allmanagers]"):
+            in_allmanagers = True
+        elif in_allmanagers and (line.startswith("[") or not line.strip()):
+            out_file.write(
+                "processscanpath = %s/scan-for-processes\n" % bin_path
+            )
+            in_allmanagers = False
+        if not line.startswith("processscanpath = "):
+            out_file.write(line)
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_120():
+    print("Upgrading %s to version 120" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev120~"])
+    in_file = open(conf_file + "-prev120~")
+    out_file = open(conf_file, "w")
+    out_file.write(in_file.read())
+    out_file.write(
+        "\n[livefilesystemmanager]\n"
+        "buildlivefspath = %s/buildlivefs\n" % bin_path
+    )
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_126():
+    print("Upgrading %s to version 126" % conf_file)
+    subprocess.call(["mv", conf_file, conf_file + "-prev126~"])
+    in_file = open(conf_file + "-prev126~")
+    out_file = open(conf_file, "w")
+    archive_ubuntu = " --archive=ubuntu"
+    for line in in_file:
+        line = line.rstrip("\n")
+        if line.endswith(archive_ubuntu):
+            line = line[: -len(archive_ubuntu)]
+        out_file.write(line + "\n")
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_127():
+    print("Upgrading %s to version 127" % conf_file)
+    os.rename(conf_file, conf_file + "-prev127~")
+
+    in_file = open(conf_file + "-prev127~")
+    out_file = open(conf_file, "w")
+    obsolete_prefixes = [
+        "[allmanagers]",
+        "[debianmanager]",
+        "[binarypackagemanager]",
+        "[sourcepackagerecipemanager]",
+        "[livefilesystemmanager]",
+        "preppath ",
+        "unpackpath ",
+        "cleanpath ",
+        "mountpath ",
+        "umountpath ",
+        "processscanpath ",
+        "updatepath ",
+        "sourcespath ",
+        "sbuildpath ",
+        "sbuildargs ",
+        "buildrecipepath ",
+        "generatepath ",
+        "buildlivefspath ",
+    ]
+    wrote_blank = False
+    for line in in_file:
+        # Remove obsolete paths and sections.
+        if any(line.startswith(p) for p in obsolete_prefixes):
+            continue
+        # Squash any sequences of blank lines into a single one.
+        if not line.strip():
+            if wrote_blank:
+                continue
+            wrote_blank = True
+        else:
+            wrote_blank = False
+        out_file.write(line)
+        # Add single new sharepath to the end of the slave section.
+        if line.startswith("ntphost "):
+            out_file.write("sharepath = /usr/share/launchpad-buildd\n")
+    in_file.close()
+    out_file.close()
+
+
+def upgrade_to_162():
+    print("Upgrading %s to version 162" % conf_file)
+    os.rename(conf_file, conf_file + "-prev162~")
+
+    with open(conf_file + "-prev162~") as in_file:
+        with open(conf_file, "w") as out_file:
+            out_file.write(in_file.read())
+            out_file.write("\n[snapmanager]\n" "proxyport = 8222\n")
+
+
+def upgrade_to_190():
+    print("Upgrading %s to version 190" % conf_file)
+    os.rename(conf_file, conf_file + "-prev190~")
+
+    with open(conf_file + "-prev190~") as in_file:
+        with open(conf_file, "w") as out_file:
+            for line in in_file:
+                if line.strip() == "[slave]":
+                    line = "[builder]\n"
+                out_file.write(line)
+
+
+def upgrade_to_200():
+    print("Upgrading %s to version 200" % conf_file)
+
+    # We need to move snapmanager.proxyport to builder.proxyport, so start
+    # by parsing the existing file to find the current value.
+    conf = SafeConfigParser()
+    conf.read(conf_file)
+    try:
+        proxyport = conf.get("snapmanager", "proxyport")
+    except (NoOptionError, NoSectionError):
+        proxyport = None
+
+    os.rename(conf_file, conf_file + "-prev200~")
+    with open(conf_file + "-prev200~") as in_file:
+        with open(conf_file, "w") as out_file:
+            in_builder = False
+            in_snapmanager = False
+            wrote_blank = False
+            for line in in_file:
+                if line.startswith("[builder]"):
+                    in_builder = True
+                elif in_builder and (line.startswith("[") or not line.strip()):
+                    if proxyport is not None:
+                        out_file.write("proxyport = %s\n" % proxyport)
+                    in_builder = False
+                elif line.startswith("[snapmanager]"):
+                    # Delete this section.
+                    in_snapmanager = True
+
+                if not line.strip():
+                    wrote_blank = True
+                elif not in_snapmanager:
+                    if wrote_blank:
+                        out_file.write("\n")
+                    out_file.write(line)
+                    wrote_blank = False
+
+                if (
+                    in_snapmanager
+                    and not line.startswith("[snapmanager]")
+                    and (line.startswith("[") or not line.strip())
+                ):
+                    in_snapmanager = False
+
+
+if __name__ == "__main__":
+    old_version = re.sub(r"[~-].*", "", old_version)
+    if apt_pkg.version_compare(old_version, "12") < 0:
+        upgrade_to_12()
+    if apt_pkg.version_compare(old_version, "34") < 0:
+        upgrade_to_34()
+    if apt_pkg.version_compare(old_version, "39") < 0:
+        upgrade_to_39()
+    if apt_pkg.version_compare(old_version, "57") < 0:
+        upgrade_to_57()
+    if apt_pkg.version_compare(old_version, "58") < 0:
+        upgrade_to_58()
+    if apt_pkg.version_compare(old_version, "59") < 0:
+        upgrade_to_59()
+    if apt_pkg.version_compare(old_version, "63") < 0:
+        upgrade_to_63()
+    if apt_pkg.version_compare(old_version, "110") < 0:
+        upgrade_to_110()
+    if apt_pkg.version_compare(old_version, "115") < 0:
+        upgrade_to_115()
+    if apt_pkg.version_compare(old_version, "120") < 0:
+        upgrade_to_120()
+    if apt_pkg.version_compare(old_version, "126") < 0:
+        upgrade_to_126()
+    if apt_pkg.version_compare(old_version, "127") < 0:
+        upgrade_to_127()
+    if apt_pkg.version_compare(old_version, "162") < 0:
+        upgrade_to_162()
+    if apt_pkg.version_compare(old_version, "190") < 0:
+        upgrade_to_190()
+    if apt_pkg.version_compare(old_version, "200") < 0:
+        upgrade_to_200()
diff --git a/default/launchpad-buildd b/default/launchpad-buildd
new file mode 100644
index 0000000..a149478
--- /dev/null
+++ b/default/launchpad-buildd
@@ -0,0 +1,11 @@
+# launchpad-buildd is disabled.
+# When enabled, launchpad-buildd accepts network commands and runs them as root.
+# If you are sure this server will only be reachable by trusted machines, edit
+# /etc/default/launchpad-buildd to enable it.
+
+# When 'yes', this machine will accept tcp connections and run arbitrary jobs
+# as root.  Do not enable this except on dedicated machines on a tightly
+# controlled network. 
+#
+# When 'no', the buildd will not run at all.
+RUN_NETWORK_REQUESTS_AS_ROOT=no
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..c81ae4d
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,64 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = "launchpad-buildd"
+copyright = "2009-2022, Canonical Ltd"
+author = "Launchpad developers"
+
+# The full version, including alpha/beta/rc tags
+release = "236"
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = "alabaster"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = []
+
+# This is required for the alabaster theme
+# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
+html_sidebars = {
+    "**": [
+        "globaltoc.html",
+        "relations.html",  # needs 'show_related': True theme option to display
+        "searchbox.html",
+    ]
+}
diff --git a/docs/explanation/deployment.rst b/docs/explanation/deployment.rst
new file mode 100644
index 0000000..aa07417
--- /dev/null
+++ b/docs/explanation/deployment.rst
@@ -0,0 +1,30 @@
+===========================
+Production deployment notes
+===========================
+
+In Launchpad's production build farm, launchpad-buildd is deployed via base
+virtual machine images constructed by taking standard Ubuntu cloud images
+and installing launchpad-buildd in them from
+https://launchpad.net/~canonical-is-sa/+archive/ubuntu/buildd.  This is done
+by
+https://code.launchpad.net/~canonical-sysadmins/canonical-is-charms/launchpad-buildd-image-modifier
+(currently private, sorry).
+
+We deliberately run builders in virtual machines rather than containers
+for the following reasons:
+
+- avoiding issues with nested containerization
+- containers are not secure enough against being escaped by malicious code
+
+------------------
+Additional context
+------------------
+
+Charm recipe builds, `Launchpad CI`_, live filesystem builds, OCI recipe
+builds, and snap recipe builds all build in LXD containers.
+Everything else builds in chroots.
+
+ .. _Launchpad CI: https://help.launchpad.net/Code/ContinuousIntegration
+
+Please note that the LXD containers do not share basic system directories with
+the host.
diff --git a/docs/explanation/malware-scanning.rst b/docs/explanation/malware-scanning.rst
new file mode 100644
index 0000000..dfbecdc
--- /dev/null
+++ b/docs/explanation/malware-scanning.rst
@@ -0,0 +1,31 @@
+Malware scanning
+****************
+
+Certain CI builds can be configured with ClamAV integration, so that builds
+have a basic malware scan performed on their output files.  This is not yet
+very generalized (it currently only works for builds in the private ``soss``
+distribution), and should not be expected to be robust.
+
+To enable this in a local Launchpad installation, set this in
+``launchpad-lazr.conf`` (or otherwise arrange for ``"scan_malware": true``
+to be included in the arguments dispatched to the builder)::
+
+    [cibuild.soss]
+    scan_malware: True
+
+``database.clamav.net`` rate-limits clients.  To avoid this, and generally
+to be good citizens, we maintain a `private mirror
+<https://docs.clamav.net/appendix/CvdPrivateMirror.html>`_ of the ClamAV
+database.  This is organized using the `clamav-database-mirror
+<https://charmhub.io/clamav-database-mirror>`_ charm, deployed via the
+`vbuilder
+<https://git.launchpad.net/~launchpad/launchpad-mojo-specs/+git/private/tree/vbuilder?h=vbuilder>`_
+Mojo spec (Canonical-internal); on production, this is exposed to builders
+as ``clamav-database-mirror.lp.internal``.  `launchpad-buildd-image-modifier
+<https://git.launchpad.net/charm-launchpad-buildd-image-modifier>`_ is
+configured to pass a suitable local URL on to ``launchpad-buildd``, but you
+can also do this in a local installation by adding something like the
+following to ``/etc/launchpad-buildd/default``::
+
+    [proxy]
+    clamavdatabase = http://clamav-database-mirror.test/
diff --git a/docs/how-to/building.rst b/docs/how-to/building.rst
new file mode 100644
index 0000000..280b2b7
--- /dev/null
+++ b/docs/how-to/building.rst
@@ -0,0 +1,22 @@
+How to build the project
+************************
+
+In order to build the package you need ``dpkg-dev`` and ``fakeroot``.
+
+To build the package, do:
+
+.. code:: bash
+
+    debian/rules package
+    dpkg-buildpackage -rfakeroot -b
+
+It will "fail" because the package built in the "wrong" place.
+Don't worry about that.
+
+To clean up, do:
+
+.. code:: bash
+
+    fakeroot debian/rules clean
+    rm launchpad-buildd*deb
+    rm ../launchpad-buildd*changes
diff --git a/docs/how-to/changelog_entry.rst b/docs/how-to/changelog_entry.rst
new file mode 100644
index 0000000..ac00da1
--- /dev/null
+++ b/docs/how-to/changelog_entry.rst
@@ -0,0 +1,20 @@
+
+How to create a changelog entry
+*******************************
+
+Prerequisites
+-------------
+
+.. code:: bash
+
+    sudo apt install devscripts
+
+Create the changelog entry
+--------------------------
+
+In order to create a changelog entry in ``debian/changelog``,
+you need to run the following command:
+
+.. code:: bash
+
+    dch -U
diff --git a/docs/how-to/deployment.rst b/docs/how-to/deployment.rst
new file mode 100644
index 0000000..896fcba
--- /dev/null
+++ b/docs/how-to/deployment.rst
@@ -0,0 +1,112 @@
+How to deploy launchpad-buildd
+******************************
+
+In Canonical's datacentre environments, launchpad-buildd is deployed as a
+``.deb`` package installed in a fleet of VMs.  To upgrade it, we need to
+rebuild the VM images.
+
+Each environment uses its own PPA and management environment:
+
++---------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+| Environment                                             | PPA and management environment                                                                                     |
++=========================================================+====================================================================================================================+
+| `production <https://launchpad.net/builders>`_          | `ppa:launchpad/ubuntu/buildd <https://launchpad.net/~launchpad/+archive/ubuntu/buildd/+packages>`_                 |
+|                                                         | ``prod-launchpad-vbuilders@is-bastion-ps5``                                                                        |
++---------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+| `qastaging <https://qastaging.launchpad.net/builders>`_ | `ppa:launchpad/ubuntu/buildd-staging <https://launchpad.net/~launchpad/+archive/ubuntu/buildd-staging/+packages>`_ |
+|                                                         | ``stg-vbuilder-qastaging@launchpad-bastion-ps5``                                                                   |
++---------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------+
+
+These instructions use various tools from `ubuntu-archive-tools
+<https://git.launchpad.net/ubuntu-archive-tools>`_ (``copy-package`` and
+``manage-builders``).
+
+Testing on qastaging
+--------------------
+
+#. Ensure everything has been merged to master.
+
+#. Check that the `recipe
+   <https://code.launchpad.net/~launchpad/+recipe/launchpad-buildd-daily>`_
+   has built successfully (you can start a build manually if required), and
+   that the resulting package has been published in the `Launchpad PPA
+   <https://launchpad.net/~launchpad/+archive/ubuntu/ppa/+packages>`_.
+
+#. Run ``copy-package --from=ppa:launchpad/ubuntu/ppa --suite=jammy
+   --to=ppa:launchpad/ubuntu/buildd-staging -b launchpad-buildd``
+   (from ``ubuntu-archive-tools``) to copy the current version of launchpad-buildd
+   to the deployment PPA (``jammy`` here refers to the series being used on
+   the builder instances).
+
+   For example, in qastaging, we have builders in ``focal`` and in ``jammy``,
+   so you should run the command for both series.
+
+#. `Wait for PPA publishing to complete
+   <https://launchpad.net/~launchpad/+archive/ubuntu/buildd-staging/+packages>`__.
+
+#. Run ``mojo run -m manifest-rebuild-images`` in the management environment
+   (``stg-vbuilder-qastaging@launchpad-bastion-ps5``) to start rebuilding images.
+   After a minute or so, ``juju status glance-simplestreams-sync-\*`` will
+   show "Synchronising images"; once this says "Sync completed", images have
+   been rebuilt.
+
+#. Builders will get the new image after they finish their next build (or
+   are disabled) and go through being reset.  Since qastaging's build farm
+   is typically mostly idle, you can use ``manage-builders -l qastaging
+   --reset`` to reset all builders and force them to pick up the new image
+   (from ``ubuntu-archive-tools``).
+
+#. Perform QA on qastaging until satisfied, see :doc:`/how-to/qa`.
+
+Releasing to production
+-----------------------
+
+#. Create a new release branch, e.g. ``release-213``, based on master.
+
+#. Run ``DEBEMAIL="<email address>" DEBFULLNAME="<name>" dch -rD focal``.
+   The later recipe build will prepend the correct preamble for each Ubuntu release.
+
+#. Create a commit with a title like ``releasing package launchpad-buildd version 213``,
+   push this branch and open a merge proposal with a title like
+   ``Release version 213`` for review.
+
+#. Once the release branch has merged to master,
+   tag the release commit (e.g. ``git tag 213 && git push origin 213``).
+
+#. Check that the `recipe
+   <https://code.launchpad.net/~launchpad/+recipe/launchpad-buildd-daily>`_
+   has built successfully (you can start a build manually if required), and
+   that the resulting package has been published in the `Launchpad PPA
+   <https://launchpad.net/~launchpad/+archive/ubuntu/ppa/+packages>`_.
+
+#. Run ``copy-package --from=ppa:launchpad/ubuntu/ppa --suite=focal
+   --to=ppa:launchpad/ubuntu/buildd -b launchpad-buildd`` to copy the
+   current version of launchpad-buildd to the deployment PPA.
+
+#. `Wait for PPA publishing to complete
+   <https://launchpad.net/~launchpad/+archive/ubuntu/buildd/+packages>`__.
+
+#. Run ``ssh prod-launchpad-vbuilders@is-bastion-ps5.internal
+   /home/prod-launchpad-vbuilders/scripts/rebuild-images.sh`` from the
+   staging management environment (``stg-vbuilder@launchpad-bastion-ps5``)
+   to start rebuilding images.
+
+#. Once the new image is rebuilt, which normally takes on the order of 15-60
+   minutes depending on the architecture, builders will get the new image
+   after they finish their next build (or are disabled) and go through being
+   reset.  As a result, ``manage-builders -v`` should start showing the new
+   version over time.
+
+#. Wait for the new version to appear for at least one builder in each
+   region and architecture.  If this doesn't happen after 90 minutes, then
+   ask IS for assistance in investigating; they can start by checking ``juju
+   status`` in ``prod-launchpad-vbuilders@is-bastion-ps5.internal``.
+
+#. Once the updated version is visible for at least one builder in each
+   region and architecture, `build farm administrators
+   <https://launchpad.net/~launchpad-buildd-admins/+members>`_ can use
+   ``manage-builders --virt --idle --builder-version=<old-version> --reset``
+   to reset idle builders, thereby causing builders that haven't taken any
+   builds recently to catch up.
+
+#. Close any bugs fixed by the new release.
diff --git a/docs/how-to/developing.rst b/docs/how-to/developing.rst
new file mode 100644
index 0000000..ebb568c
--- /dev/null
+++ b/docs/how-to/developing.rst
@@ -0,0 +1,32 @@
+How to set up a development environment
+***************************************
+
+First of all, it is recommended that you create an lxc container, since the
+following steps will make changes in your system. And since some build types
+will only work with virtualized containers, creating an lxc vm is the best way
+to go. If you just want to run the test suite, creating a container is
+sufficient.
+
+You can create a container with the following command:
+
+.. code:: bash
+
+        lxc launch --vm ubuntu:20.04 lp-builddev
+
+Note that you may want to have a profile to share the source code with the
+container before running the above command.
+
+Then, inside the container, install the necessary dependencies:
+
+.. code:: bash
+
+        sudo add-apt-repository ppa:launchpad/ppa
+        sudo apt-get update
+        cat system-dependencies.txt | sudo xargs apt-get install -y
+
+This should be enough for you to be able to run the test suite via `make
+check`.
+
+More information on how to integrate it with Launchpad can be found here:
+https://dev.launchpad.net/Soyuz/HowToDevelopWithBuildd
+
diff --git a/docs/how-to/qa.rst b/docs/how-to/qa.rst
new file mode 100644
index 0000000..e226d9a
--- /dev/null
+++ b/docs/how-to/qa.rst
@@ -0,0 +1,11 @@
+How to perform QA
+*****************
+
+Depending on the applied changes,
+you may need to perform the following build types:
+
+1. `TranslationTemplates <https://dev.launchpad.net/Translations/GenerateTemplatesOnTestServers>`_
+2. Source Package Recipe Build
+3. OCI Image Build
+4. Snap Build
+5. Binary Package Build 
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..a80b2b3
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,44 @@
+.. launchpad-buildd documentation master file, created by
+   sphinx-quickstart on Mon Feb  7 17:54:05 2022.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+launchpad-buildd
+================
+
+This project is the build daemon used by Launchpad's package-building
+infrastructure.
+
+It is licensed under the GNU Affero General Public Licence, version 3 (see the
+file LICENCE) except for some files originating elsewhere, which are marked
+with the licence that applies.
+
+See https://dev.launchpad.net/ for more context.
+
+
+.. toctree::
+   :maxdepth: 2
+
+   self
+
+How-to guides
+-------------
+
+.. toctree::
+   :maxdepth: 1
+
+   how-to/developing
+   how-to/changelog_entry
+   how-to/building
+   how-to/deployment
+   how-to/qa
+
+
+Explanation
+-----------
+
+.. toctree::
+   :maxdepth: 1
+
+   explanation/deployment
+   explanation/malware-scanning
diff --git a/lpbuildd/__init__.py b/lpbuildd/__init__.py
new file mode 100644
index 0000000..221f3c0
--- /dev/null
+++ b/lpbuildd/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
diff --git a/lpbuildd/binarypackage.py b/lpbuildd/binarypackage.py
new file mode 100644
index 0000000..5b7bc54
--- /dev/null
+++ b/lpbuildd/binarypackage.py
@@ -0,0 +1,497 @@
+# Copyright 2009-2018 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+import re
+import subprocess
+import tempfile
+import traceback
+from collections import defaultdict
+from textwrap import dedent
+
+import apt_pkg
+from debian.deb822 import Dsc, PkgRelation
+from debian.debian_support import Version
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+
+
+class SBuildExitCodes:
+    """SBUILD process result codes."""
+
+    OK = 0
+    FAILED = 1
+    ATTEMPTED = 2
+    GIVENBACK = 3
+    BUILDERFAIL = 4
+
+
+APT_MISSING_DEP_PATTERNS = [
+    r"but [^ ]* is to be installed",
+    r"but [^ ]* is installed",
+    r"but it is not installable",
+    r"but it is a virtual package",
+]
+
+
+APT_DUBIOUS_DEP_PATTERNS = [
+    r"but it is not installed",
+    r"but it is not going to be installed",
+]
+
+
+class BuildLogRegexes:
+    """Various build log regexes.
+
+    These allow performing actions based on regexes, and extracting
+    dependencies for auto dep-waits.
+    """
+
+    GIVENBACK = [
+        (r"^E: There are problems and -y was used without --force-yes"),
+    ]
+    MAYBEDEPFAIL = [
+        r"The following packages have unmet dependencies:\n"
+        r".* Depends: [^ ]*( \([^)]*\))? (%s)\n"
+        % r"|".join(APT_DUBIOUS_DEP_PATTERNS),
+    ]
+    DEPFAIL = {
+        r"The following packages have unmet dependencies:\n"
+        r".* Depends: (?P<p>[^ ]*( \([^)]*\))?) (%s)\n"
+        % r"|".join(APT_MISSING_DEP_PATTERNS): r"\g<p>",
+    }
+
+
+class DpkgArchitectureCache:
+    """Cache the results of asking questions of dpkg-architecture."""
+
+    def __init__(self):
+        self._matches = {}
+
+    def match(self, arch, wildcard):
+        if (arch, wildcard) not in self._matches:
+            command = ["dpkg-architecture", "-a%s" % arch, "-i%s" % wildcard]
+            env = dict(os.environ)
+            env.pop("DEB_HOST_ARCH", None)
+            ret = subprocess.call(command, env=env) == 0
+            self._matches[(arch, wildcard)] = ret
+        return self._matches[(arch, wildcard)]
+
+
+dpkg_architecture = DpkgArchitectureCache()
+
+
+class BinaryPackageBuildState(DebianBuildState):
+    SBUILD = "SBUILD"
+
+
+class BinaryPackageBuildManager(DebianBuildManager):
+    """Handle buildd building for a debian style binary package build"""
+
+    initial_build_state = BinaryPackageBuildState.SBUILD
+
+    def __init__(self, builder, buildid, **kwargs):
+        DebianBuildManager.__init__(self, builder, buildid, **kwargs)
+        self._sbuildpath = os.path.join(self._bin, "sbuild-package")
+
+    @property
+    def chroot_path(self):
+        return os.path.join(
+            self.home, "build-" + self._buildid, "chroot-autobuild"
+        )
+
+    @property
+    def schroot_config_path(self):
+        return os.path.join("/etc/schroot/chroot.d", "build-" + self._buildid)
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+
+        self._dscfile = None
+        for f in files:
+            if f.endswith(".dsc"):
+                self._dscfile = f
+        if self._dscfile is None:
+            raise ValueError(files)
+
+        self.archive_purpose = extra_args.get("archive_purpose")
+        self.suite = extra_args["suite"]
+        self.component = extra_args["ogrecomponent"]
+        self.arch_indep = extra_args.get("arch_indep", False)
+        self.build_debug_symbols = extra_args.get("build_debug_symbols", False)
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the sbuild process to build the package."""
+        with tempfile.NamedTemporaryFile(mode="w") as schroot_file:
+            # Use the "plain" chroot type because we do the necessary setup
+            # and teardown ourselves: it's easier to do this the same way
+            # for all build types.
+            print(
+                dedent(
+                    f"""\
+                    [build-{self._buildid}]
+                    description=build-{self._buildid}
+                    groups=sbuild,root
+                    root-groups=sbuild,root
+                    type=plain
+                    directory={self.chroot_path}
+                    """
+                ),
+                file=schroot_file,
+                end="",
+            )
+            schroot_file.flush()
+            subprocess.check_call(
+                [
+                    "sudo",
+                    "install",
+                    "-o",
+                    "root",
+                    "-g",
+                    "root",
+                    "-m",
+                    "0644",
+                    schroot_file.name,
+                    self.schroot_config_path,
+                ]
+            )
+
+        currently_building_contents = (
+            "Package: %s\n"
+            "Component: %s\n"
+            "Suite: %s\n"
+            "Purpose: %s\n"
+            % (
+                self._dscfile.split("_")[0],
+                self.component,
+                self.suite,
+                self.archive_purpose,
+            )
+        )
+        if self.build_debug_symbols:
+            currently_building_contents += "Build-Debug-Symbols: yes\n"
+        with self.backend.open(
+            "/CurrentlyBuilding", mode="w+"
+        ) as currently_building:
+            currently_building.write(currently_building_contents)
+            os.fchmod(currently_building.fileno(), 0o644)
+
+        args = ["sbuild-package", self._buildid, self.arch_tag]
+        args.append(self.suite)
+        args.extend(["-c", "chroot:build-%s" % self._buildid])
+        args.append("--arch=" + self.arch_tag)
+        args.append("--dist=" + self.suite)
+        args.append("--nolog")
+        if self.arch_indep:
+            args.append("-A")
+        args.append(self._dscfile)
+        env = dict(os.environ)
+        if self.build_debug_symbols:
+            env.pop("DEB_BUILD_OPTIONS", None)
+        else:
+            env["DEB_BUILD_OPTIONS"] = "noautodbgsym"
+        self.runSubProcess(self._sbuildpath, args, env=env)
+
+    def getAptLists(self):
+        """Yield each of apt's Packages files in turn as a file object."""
+        apt_helper = "/usr/lib/apt/apt-helper"
+        paths = None
+        if os.path.exists(os.path.join(self.chroot_path, apt_helper[1:])):
+            try:
+                paths = subprocess.check_output(
+                    [
+                        "sudo",
+                        "chroot",
+                        self.chroot_path,
+                        "apt-get",
+                        "indextargets",
+                        "--format",
+                        "$(FILENAME)",
+                        "Created-By: Packages",
+                    ],
+                    universal_newlines=True,
+                ).splitlines()
+            except subprocess.CalledProcessError:
+                # This might be e.g. Ubuntu 14.04, where
+                # /usr/lib/apt/apt-helper exists but "apt-get indextargets"
+                # doesn't.  Fall back to reading Packages files directly.
+                pass
+        if paths is not None:
+            for path in paths:
+                helper = subprocess.Popen(
+                    [
+                        "sudo",
+                        "chroot",
+                        self.chroot_path,
+                        apt_helper,
+                        "cat-file",
+                        path,
+                    ],
+                    stdout=subprocess.PIPE,
+                )
+                try:
+                    yield helper.stdout
+                finally:
+                    helper.stdout.read()
+                    helper.wait()
+        else:
+            apt_lists = os.path.join(
+                self.chroot_path, "var", "lib", "apt", "lists"
+            )
+            for name in sorted(os.listdir(apt_lists)):
+                if name.endswith("_Packages"):
+                    path = os.path.join(apt_lists, name)
+                    with open(path, "rb") as packages_file:
+                        yield packages_file
+
+    def getAvailablePackages(self):
+        """Return the available binary packages in the chroot.
+
+        :return: A dictionary mapping package names to a set of the
+            available versions of each package.
+        """
+        available = defaultdict(set)
+        for packages_file in self.getAptLists():
+            for section in apt_pkg.TagFile(packages_file):
+                available[section["package"]].add(section["version"])
+                if "provides" in section:
+                    provides = apt_pkg.parse_depends(section["provides"])
+                    for provide in provides:
+                        # Disjunctions are currently undefined here.
+                        if len(provide) > 1:
+                            continue
+                        # Virtual packages may only provide an exact version
+                        # or none.
+                        if provide[0][1] and provide[0][2] != "=":
+                            continue
+                        available[provide[0][0]].add(
+                            provide[0][1] if provide[0][1] else None
+                        )
+        return available
+
+    def getBuildDepends(self, dscpath, arch_indep):
+        """Get the build-dependencies of a source package.
+
+        :param dscpath: The path of a .dsc file.
+        :param arch_indep: True iff we were asked to build the
+            architecture-independent parts of this source package.
+        :return: The build-dependencies, in the form returned by
+            `debian.deb822.PkgRelation.parse_relations`.
+        """
+        deps = []
+        with open(dscpath, "rb") as dscfile:
+            dsc = Dsc(dscfile)
+            fields = ["Build-Depends", "Build-Depends-Arch"]
+            if arch_indep:
+                fields.append("Build-Depends-Indep")
+            for field in fields:
+                if field in dsc:
+                    deps.extend(PkgRelation.parse_relations(dsc[field]))
+        return deps
+
+    def relationMatches(self, dep, available):
+        """Return True iff a dependency matches an available package.
+
+        :param dep: A dictionary with at least a "name" key, perhaps also
+            "version", "arch", and "restrictions" keys, and optionally other
+            keys, of the kind returned in a list of lists by
+            `debian.deb822.PkgRelation.parse_relations`.
+        :param available: A dictionary mapping package names to a list of
+            the available versions of each package.
+        """
+        dep_arch = dep.get("arch")
+        if dep_arch is not None:
+            arch_match = False
+            for enabled, arch_wildcard in dep_arch:
+                if dpkg_architecture.match(self.arch_tag, arch_wildcard):
+                    arch_match = enabled
+                    break
+                elif not enabled:
+                    # Any !other-architecture restriction implies that this
+                    # architecture is allowed, unless it's specifically
+                    # excluded by some other restriction.
+                    arch_match = True
+            if not arch_match:
+                # This dependency "matches" in the sense that it's ignored
+                # on this architecture.
+                return True
+        dep_restrictions = dep.get("restrictions")
+        if dep_restrictions is not None:
+            if all(
+                any(restriction.enabled for restriction in restrlist)
+                for restrlist in dep_restrictions
+            ):
+                # This dependency "matches" in the sense that it's ignored
+                # when no build profiles are enabled.
+                return True
+        if dep["name"] not in available:
+            return False
+        dep_version = dep.get("version")
+        if dep_version is None:
+            return True
+        operator_map = {
+            "<<": (lambda a, b: a < b),
+            "<=": (lambda a, b: a <= b),
+            "=": (lambda a, b: a == b),
+            ">=": (lambda a, b: a >= b),
+            ">>": (lambda a, b: a > b),
+        }
+        operator = operator_map[dep_version[0]]
+        want_version = dep_version[1]
+        for version in available[dep["name"]]:
+            if version is not None and operator(
+                Version(version), want_version
+            ):
+                return True
+        return False
+
+    def stripDependencies(self, deps):
+        """Return a stripped and stringified representation of a dependency.
+
+        The build master can't handle the various qualifications and
+        restrictions that may be present in control-format
+        build-dependencies (e.g. ":any", "[amd64]", or "<!nocheck>"), so we
+        strip these out before returning them.
+
+        :param deps: Build-dependencies in the form returned by
+            `debian.deb822.PkgRelation.parse_relations`.
+        :return: A stripped dependency relation string, or None if deps is
+            empty.
+        """
+        stripped_deps = []
+        for or_dep in deps:
+            stripped_or_dep = []
+            for simple_dep in or_dep:
+                stripped_simple_dep = dict(simple_dep)
+                stripped_simple_dep["arch"] = None
+                stripped_simple_dep["archqual"] = None
+                stripped_simple_dep["restrictions"] = None
+                stripped_or_dep.append(stripped_simple_dep)
+            stripped_deps.append(stripped_or_dep)
+        if stripped_deps:
+            return PkgRelation.str(stripped_deps)
+        else:
+            return None
+
+    def analyseDepWait(self, deps, avail):
+        """Work out the correct dep-wait for a failed build, if any.
+
+        We only consider direct build-dependencies, because indirect ones
+        can't easily be turned into an accurate dep-wait: they might be
+        resolved either by an intermediate package changing or by the
+        missing dependency becoming available.  We err on the side of
+        failing a build rather than setting a dep-wait if it's possible that
+        the dep-wait might be incorrect.  Any exception raised during the
+        analysis causes the build to be failed.
+
+        :param deps: The source package's build-dependencies, in the form
+            returned by `debian.deb822.PkgRelation.parse_relations`.
+        :param avail: A dictionary mapping package names to a set of the
+            available versions of each package.
+        :return: A dependency relation string representing the packages that
+            need to become available before this build can proceed, or None
+            if the build should be failed instead.
+        """
+        try:
+            unsat_deps = []
+            for or_dep in deps:
+                if not any(self.relationMatches(dep, avail) for dep in or_dep):
+                    unsat_deps.append(or_dep)
+            return self.stripDependencies(unsat_deps)
+        except Exception:
+            self._builder.log("Failed to analyse dep-wait:\n")
+            for line in traceback.format_exc().splitlines(True):
+                self._builder.log(line)
+            return None
+
+    def iterate_SBUILD(self, success):
+        """Finished the sbuild run."""
+        if success == SBuildExitCodes.OK:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+
+        log_patterns = []
+        stop_patterns = [[r"^Toolchain package versions:", re.M]]
+
+        # We don't distinguish attempted and failed.
+        if success == SBuildExitCodes.ATTEMPTED:
+            success = SBuildExitCodes.FAILED
+
+        if success == SBuildExitCodes.GIVENBACK:
+            for rx in BuildLogRegexes.GIVENBACK:
+                log_patterns.append([rx, re.M])
+            # Check the last 4KiB for the Fail-Stage. If it failed
+            # during install-deps, search for the missing dependency
+            # string.
+            with open(os.path.join(self._cachepath, "buildlog"), "rb") as log:
+                try:
+                    log.seek(-4096, os.SEEK_END)
+                except OSError:
+                    pass
+                tail = log.read(4096).decode("UTF-8", "replace")
+            if re.search(r"^Fail-Stage: install-deps$", tail, re.M):
+                for rx in BuildLogRegexes.MAYBEDEPFAIL:
+                    log_patterns.append([rx, re.M | re.S])
+                for rx in BuildLogRegexes.DEPFAIL:
+                    log_patterns.append([rx, re.M | re.S])
+
+        missing_dep = None
+        if log_patterns:
+            rx, mo = self.searchLogContents(log_patterns, stop_patterns)
+            if mo is None:
+                # It was givenback, but we can't see a valid reason.
+                # Assume it failed.
+                success = SBuildExitCodes.FAILED
+            elif rx in BuildLogRegexes.MAYBEDEPFAIL:
+                # These matches need further analysis.
+                dscpath = os.path.join(
+                    self.home, "build-%s" % self._buildid, self._dscfile
+                )
+                missing_dep = self.analyseDepWait(
+                    self.getBuildDepends(dscpath, self.arch_indep),
+                    self.getAvailablePackages(),
+                )
+                if missing_dep is None:
+                    success = SBuildExitCodes.FAILED
+            elif rx in BuildLogRegexes.DEPFAIL:
+                # A depwait match forces depwait.
+                missing_dep = mo.expand(
+                    BuildLogRegexes.DEPFAIL[rx].encode("UTF-8")
+                )
+                missing_dep = self.stripDependencies(
+                    PkgRelation.parse_relations(
+                        missing_dep.decode("UTF-8", "replace")
+                    )
+                )
+            else:
+                # Otherwise it was a givenback pattern, so leave it
+                # in givenback.
+                pass
+
+        if not self.alreadyfailed:
+            if missing_dep is not None:
+                print("Returning build status: DEPFAIL")
+                print("Dependencies: " + missing_dep)
+                self._builder.depFail(missing_dep)
+            elif success == SBuildExitCodes.GIVENBACK:
+                print("Returning build status: GIVENBACK")
+                self._builder.giveBack()
+            elif success == SBuildExitCodes.FAILED:
+                print("Returning build status: PACKAGEFAIL")
+                self._builder.buildFail()
+            elif success >= SBuildExitCodes.BUILDERFAIL:
+                # anything else is assumed to be a buildd failure
+                print("Returning build status: BUILDERFAIL")
+                self._builder.builderFail()
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_SBUILD(self, success):
+        """Finished reaping after sbuild run."""
+        # Ignore errors from tearing down schroot configuration.
+        subprocess.call(["sudo", "rm", "-f", self.schroot_config_path])
+
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
diff --git a/lpbuildd/buildd.tac b/lpbuildd/buildd.tac
new file mode 100644
index 0000000..b2d756a
--- /dev/null
+++ b/lpbuildd/buildd.tac
@@ -0,0 +1,71 @@
+# Copyright 2009-2011 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# XXX: dsilvers: 2005/01/21: Currently everything logged in the builder gets
+# passed through to the twistd log too. this could get dangerous/big
+
+try:
+    from configparser import ConfigParser as SafeConfigParser
+except ImportError:
+    from ConfigParser import SafeConfigParser
+
+import os
+
+from twisted.application import service, strports
+from twisted.scripts.twistd import ServerOptions
+from twisted.web import resource, server, static
+
+from lpbuildd.binarypackage import BinaryPackageBuildManager
+from lpbuildd.builder import XMLRPCBuilder
+from lpbuildd.charm import CharmBuildManager
+from lpbuildd.ci import CIBuildManager
+from lpbuildd.livefs import LiveFilesystemBuildManager
+from lpbuildd.log import RotatableFileLogObserver
+from lpbuildd.oci import OCIBuildManager
+from lpbuildd.snap import SnapBuildManager
+from lpbuildd.sourcepackagerecipe import SourcePackageRecipeBuildManager
+from lpbuildd.translationtemplates import TranslationTemplatesBuildManager
+
+options = ServerOptions()
+options.parseOptions()
+
+conffile = os.environ.get("BUILDD_CONFIG", "buildd-example.conf")
+
+conf = SafeConfigParser()
+conf.read(conffile)
+builder = XMLRPCBuilder(conf)
+
+builder.registerManager(BinaryPackageBuildManager, "binarypackage")
+builder.registerManager(SourcePackageRecipeBuildManager, "sourcepackagerecipe")
+builder.registerManager(
+    TranslationTemplatesBuildManager, "translation-templates"
+)
+builder.registerManager(LiveFilesystemBuildManager, "livefs")
+builder.registerManager(SnapBuildManager, "snap")
+builder.registerManager(OCIBuildManager, "oci")
+builder.registerManager(CharmBuildManager, "charm")
+builder.registerManager(CIBuildManager, "ci")
+
+application = service.Application("Builder")
+application.addComponent(
+    RotatableFileLogObserver(options.get("logfile")), ignoreClass=1
+)
+builderService = service.IServiceCollection(application)
+builder.builder.service = builderService
+
+root = resource.Resource()
+root.putChild(b"rpc", builder)
+root.putChild(b"filecache", static.File(conf.get("builder", "filecache")))
+buildersite = server.Site(root)
+
+strports.service(
+    "tcp:%s" % builder.builder._config.get("builder", "bindport"), buildersite
+).setServiceParent(builderService)
+
+# You can interact with a running builder like this:
+# (assuming the builder is on localhost:8221)
+#
+# python3
+# from xmlrpc.client import ServerProxy
+# s = ServerProxy("http://localhost:8221/rpc";)
+# s.echo("Hello World")
diff --git a/lpbuildd/builder.py b/lpbuildd/builder.py
new file mode 100644
index 0000000..f821b34
--- /dev/null
+++ b/lpbuildd/builder.py
@@ -0,0 +1,916 @@
+# Copyright 2009, 2010 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# Authors: Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>
+#      and Adam Conrad <adam.conrad@xxxxxxxxxxxxx>
+
+# The basic builder implementation.
+
+import hashlib
+import json
+import os
+import re
+import shutil
+import sys
+import tempfile
+from functools import partial
+from urllib.request import (
+    HTTPBasicAuthHandler,
+    HTTPPasswordMgrWithPriorAuth,
+    build_opener,
+    urlopen,
+)
+from xmlrpc.client import Binary
+
+import apt
+from twisted.internet import process, protocol
+from twisted.internet import reactor as default_reactor
+from twisted.python import log
+from twisted.web import xmlrpc
+
+from lpbuildd.target.backend import make_backend
+from lpbuildd.util import shell_escape
+
+devnull = open("/dev/null")
+
+
+def _sanitizeURLs(bytes_seq):
+    """A generator that deletes URL passwords from a bytes sequence.
+
+    This generator removes user/password data from URLs if embedded
+    in the latter as follows: scheme://user:passwd@netloc/path.
+
+    :param bytes_seq: A sequence of byte strings (that may contain URLs).
+    :return: A (sanitized) line stripped of authentication credentials.
+    """
+    # This regular expression will be used to remove authentication
+    # credentials from URLs.
+    password_re = re.compile(rb"://([^:@/]*:[^:@/]+@)(\S+)")
+    # Builder proxy passwords are UUIDs.
+    proxy_auth_re = re.compile(rb",proxyauth=[^:]+:[A-Za-z0-9-]+")
+
+    for line in bytes_seq:
+        sanitized_line = password_re.sub(rb"://\2", line)
+        sanitized_line = proxy_auth_re.sub(b"", sanitized_line)
+        yield sanitized_line
+
+
+# XXX cprov 2005-06-28:
+# RunCapture can be replaced with a call to
+#
+#   twisted.internet.utils.getProcessOutputAndValue
+#
+# when we start using Twisted 2.0.
+class RunCapture(protocol.ProcessProtocol):
+    """Run a command and capture its output to a builder's log."""
+
+    def __init__(self, builder, callback, stdin=None):
+        self.builder = builder
+        self.notify = callback
+        self.stdin = stdin
+        self.builderFailCall = None
+        self.ignore = False
+
+    def connectionMade(self):
+        """Write any stdin data."""
+        if self.stdin is not None:
+            self.transport.write(self.stdin)
+            self.transport.closeStdin()
+
+    def outReceived(self, data):
+        """Pass on stdout data to the log."""
+        self.builder.log(data)
+
+    def errReceived(self, data):
+        """Pass on stderr data to the log.
+
+        With a bit of luck we won't interleave horribly."""
+        self.builder.log(data)
+
+    def processEnded(self, statusobject):
+        """This method is called when a child process got terminated.
+
+        Two actions are required at this point: eliminate pending calls to
+        "builderFail", and invoke the programmed notification callback.  The
+        notification callback must be invoked last.
+        """
+        if self.ignore:
+            # The build manager no longer cares about this process.
+            return
+
+        # Since the process terminated, we don't need to fail the builder.
+        if self.builderFailCall and self.builderFailCall.active():
+            self.builderFailCall.cancel()
+
+        # notify the builder, it'll perform the required actions
+        if self.notify is not None:
+            self.notify(statusobject.value.exitCode)
+
+
+def get_build_path(home, build_id, *extra):
+    """Generate a path within the build directory.
+
+    :param home: the user's home directory.
+    :param build_id: the build id to use.
+    :param extra: the extra path segments within the build directory.
+    :return: the generated path.
+    """
+    return os.path.join(home, "build-" + build_id, *extra)
+
+
+class BuildManager:
+    """Build manager abstract parent."""
+
+    backend_name = "chroot"
+
+    def __init__(self, builder, buildid, reactor=None):
+        """Create a BuildManager.
+
+        :param builder: A `Builder`.
+        :param buildid: Identifying string for this build.
+        """
+        object.__init__(self)
+        self._buildid = buildid
+        self._builder = builder
+        if reactor is None:
+            reactor = default_reactor
+        self._reactor = reactor
+        self._sharepath = builder._config.get("builder", "sharepath")
+        self._bin = os.path.join(self._sharepath, "bin")
+        self._preppath = os.path.join(self._bin, "builder-prep")
+        self._intargetpath = os.path.join(self._bin, "in-target")
+        self._subprocess = None
+        self._reaped_states = set()
+        self.is_archive_private = False
+        self.home = os.environ["HOME"]
+        self.abort_timeout = 120
+        self.status_path = get_build_path(self.home, self._buildid, "status")
+        self._final_extra_status = None
+
+    @property
+    def needs_sanitized_logs(self):
+        return self.is_archive_private
+
+    def runSubProcess(self, command, args, iterate=None, stdin=None, env=None):
+        """Run a subprocess capturing the results in the log."""
+        if iterate is None:
+            iterate = self.iterate
+        self._subprocess = RunCapture(self._builder, iterate, stdin=stdin)
+        text_args = [
+            arg.decode("UTF-8", "replace") if isinstance(arg, bytes) else arg
+            for arg in args[1:]
+        ]
+        escaped_args = " ".join(shell_escape(arg) for arg in text_args)
+        self._builder.log(f"RUN: {command} {escaped_args}\n")
+        childfds = {
+            0: devnull.fileno() if stdin is None else "w",
+            1: "r",
+            2: "r",
+        }
+        self._reactor.spawnProcess(
+            self._subprocess,
+            command,
+            args,
+            env=env,
+            path=self.home,
+            childFDs=childfds,
+        )
+
+    def runTargetSubProcess(self, command, *args, **kwargs):
+        """Run a subprocess that operates on the target environment."""
+        base_args = [
+            "in-target",
+            command,
+            "--backend=%s" % self.backend_name,
+            "--series=%s" % self.series,
+            "--arch=%s" % self.arch_tag,
+        ]
+        for constraint in self.constraints:
+            base_args.append("--constraint=%s" % constraint)
+        base_args.append(self._buildid)
+        self.runSubProcess(
+            self._intargetpath, base_args + list(args), **kwargs
+        )
+
+    def doUnpack(self):
+        """Unpack the build chroot."""
+        self.runTargetSubProcess(
+            "unpack-chroot",
+            "--image-type",
+            self.image_type,
+            self._chroottarfile,
+        )
+
+    def doReapProcesses(self, state, notify=True):
+        """Reap any processes left lying around in the chroot."""
+        if state is not None and state in self._reaped_states:
+            # We've already reaped this state.  To avoid a loop, proceed
+            # immediately to the next iterator.
+            self._builder.log("Already reaped from state %s...\n" % state)
+            if notify:
+                self.iterateReap(state, 0)
+        else:
+            if state is not None:
+                self._reaped_states.add(state)
+            if notify:
+                iterate = partial(self.iterateReap, state)
+            else:
+
+                def iterate(success):
+                    pass
+
+            self.runTargetSubProcess("scan-for-processes", iterate=iterate)
+
+    def doCleanup(self):
+        """Remove the build tree etc."""
+        # Fetch a final snapshot of manager-specific extra status.
+        self._final_extra_status = self.status()
+
+        if not self.fast_cleanup:
+            self.runTargetSubProcess("remove-build")
+
+        # Sanitize the URLs in the buildlog file if this is a build
+        # in a private archive.
+        if self.needs_sanitized_logs:
+            self._builder.sanitizeBuildlog(self._builder.cachePath("buildlog"))
+
+        if self.fast_cleanup:
+            self.iterate(0, quiet=True)
+
+    def doMounting(self):
+        """Mount things in the chroot, e.g. proc."""
+        self.runTargetSubProcess("mount-chroot")
+
+    def doUnmounting(self):
+        """Unmount the chroot."""
+        if self.fast_cleanup:
+            self.iterate(0, quiet=True)
+        else:
+            self.runTargetSubProcess("umount-chroot")
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build given the input files.
+
+        Please note: the 'extra_args' dictionary may contain a boolean
+        value keyed under the 'archive_private' string. If that value
+        evaluates to True the build at hand is for a private archive.
+        """
+        if "build_url" in extra_args:
+            self._builder.log("%s\n" % extra_args["build_url"])
+
+        os.mkdir(get_build_path(self.home, self._buildid))
+        for f in files:
+            os.symlink(
+                self._builder.cachePath(files[f]),
+                get_build_path(self.home, self._buildid, f),
+            )
+        self._chroottarfile = self._builder.cachePath(chroot)
+
+        self.image_type = extra_args.get("image_type", "chroot")
+        self.series = extra_args["series"]
+        self.arch_tag = extra_args.get("arch_tag", self._builder.getArch())
+        self.fast_cleanup = extra_args.get("fast_cleanup", False)
+        self.constraints = extra_args.get("builder_constraints") or []
+
+        # Check whether this is a build in a private archive and
+        # whether the URLs in the buildlog file should be sanitized
+        # so that they do not contain any embedded authentication
+        # credentials.
+        if extra_args.get("archive_private"):
+            self.is_archive_private = True
+
+        self.backend = make_backend(
+            self.backend_name,
+            self._buildid,
+            series=self.series,
+            arch=self.arch_tag,
+            constraints=self.constraints,
+        )
+
+        self.runSubProcess(self._preppath, ["builder-prep"])
+
+    def status(self):
+        """Return extra status for this build manager, as a dictionary.
+
+        This may be used to return manager-specific information from the
+        XML-RPC status call.
+        """
+        if self._final_extra_status is not None:
+            return self._final_extra_status
+        try:
+            with open(self.status_path) as status_file:
+                return json.load(status_file)
+        except OSError:
+            pass
+        except Exception as e:
+            print(
+                "Error deserialising extra status file: %s" % e,
+                file=sys.stderr,
+            )
+        return {}
+
+    def iterate(self, success, quiet=False):
+        """Perform an iteration of the builder.
+
+        The BuildManager tends to work by invoking several
+        subprocesses in order. the iterate method is called by the
+        object created by runSubProcess to gather the results of the
+        sub process.
+        """
+        raise NotImplementedError(
+            "BuildManager should be subclassed to be " "used"
+        )
+
+    def iterateReap(self, state, success):
+        """Perform an iteration of the builder following subprocess reaping.
+
+        Subprocess reaping is special, typically occurring at several
+        positions in a build manager's state machine.  We therefore keep
+        track of the state being reaped so that we can select the
+        appropriate next state.
+        """
+        raise NotImplementedError(
+            "BuildManager should be subclassed to be " "used"
+        )
+
+    def abortReap(self):
+        """Abort by killing all processes in the chroot, as hard as we can.
+
+        We expect this to result in the main build process exiting non-zero
+        and giving us some useful logs.
+
+        This may be overridden in subclasses so that they can perform their
+        own state machine management.
+        """
+        self.doReapProcesses(None, notify=False)
+
+    def abort(self):
+        """Abort the build by killing the subprocess."""
+        if self.alreadyfailed or self._subprocess is None:
+            return
+        else:
+            self.alreadyfailed = True
+        primary_subprocess = self._subprocess
+        self.abortReap()
+        # In extreme cases the build may be hung too badly for
+        # scan-for-processes to manage to kill it (blocked on I/O,
+        # forkbombing test suite, etc.).  In this case, fail the builder and
+        # let an admin sort it out.
+        self._subprocess.builderFailCall = self._reactor.callLater(
+            self.abort_timeout,
+            self.builderFail,
+            "Failed to kill all processes.",
+            primary_subprocess,
+        )
+
+    def builderFail(self, reason, primary_subprocess):
+        """Mark the builder as failed."""
+        self._builder.log("ABORTING: %s\n" % reason)
+        self._subprocess.builderFailCall = None
+        self._builder.builderFail()
+        self.alreadyfailed = True
+        # If we failed to kill all processes in the chroot, then the primary
+        # subprocess (i.e. the one running immediately before
+        # doReapProcesses was called) may not have exited.  Kill it so that
+        # we can proceed.
+        try:
+            primary_subprocess.transport.signalProcess("KILL")
+        except process.ProcessExitedAlready:
+            self._builder.log("ABORTING: Process Exited Already\n")
+        primary_subprocess.transport.loseConnection()
+        # Leave the reaper running, but disconnect it from our state
+        # machine.  Perhaps an admin can make something of it, and in any
+        # case scan-for-processes elevates itself to root so it's awkward to
+        # kill it.
+        self._subprocess.ignore = True
+        self._subprocess.transport.loseConnection()
+
+    def addWaitingFileFromBackend(self, path, name=None):
+        fetched_dir = tempfile.mkdtemp()
+        try:
+            fetched_path = os.path.join(fetched_dir, os.path.basename(path))
+            self.backend.copy_out(path, fetched_path)
+            self._builder.addWaitingFile(fetched_path, name=name)
+        finally:
+            shutil.rmtree(fetched_dir)
+
+
+class BuilderStatus:
+    """Status values for the builder."""
+
+    IDLE = "BuilderStatus.IDLE"
+    BUILDING = "BuilderStatus.BUILDING"
+    WAITING = "BuilderStatus.WAITING"
+    ABORTING = "BuilderStatus.ABORTING"
+
+    UNKNOWNSUM = "BuilderStatus.UNKNOWNSUM"
+    UNKNOWNBUILDER = "BuilderStatus.UNKNOWNBUILDER"
+
+
+class BuildStatus:
+    """Status values for builds themselves."""
+
+    OK = "BuildStatus.OK"
+    DEPFAIL = "BuildStatus.DEPFAIL"
+    GIVENBACK = "BuildStatus.GIVENBACK"
+    PACKAGEFAIL = "BuildStatus.PACKAGEFAIL"
+    CHROOTFAIL = "BuildStatus.CHROOTFAIL"
+    BUILDERFAIL = "BuildStatus.BUILDERFAIL"
+    ABORTED = "BuildStatus.ABORTED"
+
+
+class Builder:
+    """The core of a builder."""
+
+    def __init__(self, config):
+        object.__init__(self)
+        self._config = config
+        self.builderstatus = BuilderStatus.IDLE
+        self._cachepath = self._config.get("builder", "filecache")
+        self.buildstatus = BuildStatus.OK
+        self.waitingfiles = {}
+        self.builddependencies = ""
+        self._log = None
+        self.manager = None
+
+        if not os.path.isdir(self._cachepath):
+            raise ValueError("FileCache path is not a dir")
+
+    def getArch(self):
+        """Return the Architecture tag for the builder."""
+        return self._config.get("builder", "architecturetag")
+
+    def cachePath(self, file):
+        """Return the path in the cache of the file specified."""
+        return os.path.join(self._cachepath, file)
+
+    def setupAuthHandler(self, url, username, password):
+        """Set up a BasicAuthHandler to open the url.
+
+        :param url: The URL that needs authenticating.
+        :param username: The username for authentication.
+        :param password: The password for authentication.
+        :return: The OpenerDirector instance.
+
+        This helper installs an HTTPBasicAuthHandler that will deal with any
+        HTTP basic authentication required when opening the URL.
+        """
+        password_mgr = HTTPPasswordMgrWithPriorAuth()
+        password_mgr.add_password(
+            None, url, username, password, is_authenticated=True
+        )
+        handler = HTTPBasicAuthHandler(password_mgr)
+        opener = build_opener(handler)
+        return opener
+
+    def ensurePresent(self, sha1sum, url=None, username=None, password=None):
+        """Ensure we have the file with the checksum specified.
+
+        Optionally you can provide the librarian URL and
+        the builder will fetch the file if it doesn't have it.
+        Return a tuple containing: (<present>, <info>)
+        """
+        extra_info = "No URL"
+        cachefile = self.cachePath(sha1sum)
+        if url is not None:
+            extra_info = "Cache"
+            if not os.path.exists(cachefile):
+                self.log(f"Fetching {sha1sum} by url {url}")
+                if username or password:
+                    opener = self.setupAuthHandler(
+                        url, username, password
+                    ).open
+                else:
+                    opener = urlopen
+                try:
+                    f = opener(url)
+                # Don't change this to URLError without thoroughly
+                # testing for regressions. For now, just suppress
+                # the PyLint warnings.
+                # pylint: disable-msg=W0703
+                except Exception as info:
+                    extra_info = "Error accessing Librarian: %s" % info
+                    self.log(extra_info)
+                else:
+                    of = open(cachefile + ".tmp", "wb")
+                    # Upped for great justice to 256k
+                    check_sum = hashlib.sha1()
+                    for chunk in iter(lambda: f.read(256 * 1024), b""):
+                        of.write(chunk)
+                        check_sum.update(chunk)
+                    of.close()
+                    f.close()
+                    extra_info = "Download"
+                    if check_sum.hexdigest() != sha1sum:
+                        os.remove(cachefile + ".tmp")
+                        extra_info = "Digests did not match, removing again!"
+                    else:
+                        os.rename(cachefile + ".tmp", cachefile)
+                    self.log(extra_info)
+        return (os.path.exists(cachefile), extra_info)
+
+    def storeFile(self, path):
+        """Store the content of the provided path in the file cache."""
+        f = open(path, "rb")
+        tmppath = self.cachePath("storeFile.tmp")
+        of = open(tmppath, "wb")
+        try:
+            sha1 = hashlib.sha1()
+            for chunk in iter(lambda: f.read(256 * 1024), b""):
+                sha1.update(chunk)
+                of.write(chunk)
+            sha1sum = sha1.hexdigest()
+        finally:
+            of.close()
+            f.close()
+        present, info = self.ensurePresent(sha1sum)
+        if present:
+            os.unlink(tmppath)
+            return sha1sum
+        os.rename(tmppath, self.cachePath(sha1sum))
+        return sha1sum
+
+    def addWaitingFile(self, path, name=None):
+        """Add a file to the cache and store its details for reporting."""
+        if name is None:
+            name = os.path.basename(path)
+        self.waitingfiles[name] = self.storeFile(path)
+
+    def abort(self):
+        """Abort the current build."""
+        # XXX: dsilvers: 2005-01-21: Current abort mechanism doesn't wait
+        # for abort to complete. This is potentially an issue in a heavy
+        # load situation.
+        if self.builderstatus == BuilderStatus.ABORTING:
+            # This might happen if the master side restarts in the middle of
+            # an abort cycle.
+            self.log("Builder already ABORTING when asked to abort")
+            return
+        if self.builderstatus != BuilderStatus.BUILDING:
+            # XXX: Should raise a known Fault so that the client can make
+            # useful decisions about the error!
+            raise ValueError("Builder is not BUILDING when asked to abort")
+        self.manager.abort()
+        self.builderstatus = BuilderStatus.ABORTING
+
+    def clean(self):
+        """Clean up pending files and reset the internal build state."""
+        if self.builderstatus != BuilderStatus.WAITING:
+            raise ValueError("Builder is not WAITING when asked to clean")
+        for f in set(self.waitingfiles.values()):
+            os.remove(self.cachePath(f))
+        self.builderstatus = BuilderStatus.IDLE
+        if self._log is not None:
+            self._log.close()
+            os.remove(self.cachePath("buildlog"))
+            self._log = None
+        self.waitingfiles = {}
+        self.builddependencies = ""
+        self.manager = None
+        self.buildstatus = BuildStatus.OK
+
+    def log(self, data):
+        """Write the provided data to the log."""
+        if self._log is not None:
+            data_bytes = (
+                data if isinstance(data, bytes) else data.encode("UTF-8")
+            )
+            self._log.write(data_bytes)
+            self._log.flush()
+        data_text = (
+            data if isinstance(data, str) else data.decode("UTF-8", "replace")
+        )
+        if data_text.endswith("\n"):
+            data_text = data_text[:-1]
+        log.msg("Build log: " + data_text)
+
+    def getLogTail(self):
+        """Return the tail of the log.
+
+        If the buildlog is not yet opened for writing (self._log is None),
+        return an empty bytes object.
+
+        It safely tries to open the 'buildlog', if it doesn't exist, due to
+        job cleanup or buildlog sanitization race-conditions, it also returns
+        an empty bytes object.
+
+        When the 'buildlog' is present it returns up to 2 KiB bytes of the
+        end of the file.
+
+        The returned content will be 'sanitized', see `_sanitizeURLs` for
+        further information.
+        """
+        if self._log is None:
+            return b""
+
+        rlog = None
+        try:
+            try:
+                rlog = open(self.cachePath("buildlog"), "rb")
+            except OSError:
+                ret = b""
+            else:
+                # We rely on good OS practices that keep the file handler
+                # usable once it's opened. So, if open() is ok, a subsequent
+                # seek/tell/read will be safe.
+                rlog.seek(0, os.SEEK_END)
+                count = rlog.tell()
+                if count > 2048:
+                    count = 2048
+                rlog.seek(-count, os.SEEK_END)
+                ret = rlog.read(count)
+        finally:
+            if rlog is not None:
+                rlog.close()
+
+        if self.manager.needs_sanitized_logs:
+            # This is a build in a private archive. We need to scrub
+            # the URLs contained in the buildlog excerpt in order to
+            # avoid leaking passwords.
+            log_lines = ret.splitlines()
+
+            # Please note: we are throwing away the first line (of the
+            # excerpt to be scrubbed) because it may be cut off thus
+            # thwarting the detection of embedded passwords.
+            clean_content_iter = _sanitizeURLs(log_lines[1:])
+            ret = b"\n".join(clean_content_iter)
+
+        return ret
+
+    def startBuild(self, manager):
+        """Start a build with the provided BuildManager instance."""
+        if self.builderstatus != BuilderStatus.IDLE:
+            raise ValueError(
+                "Builder is not IDLE when asked to start building"
+            )
+        self.manager = manager
+        self.builderstatus = BuilderStatus.BUILDING
+        self.emptyLog()
+
+    def emptyLog(self):
+        """Empty the log and start again."""
+        if self._log is not None:
+            self._log.close()
+        self._log = open(self.cachePath("buildlog"), "wb")
+
+    def builderFail(self):
+        """Cease building because the builder has a problem."""
+        if self.builderstatus not in (
+            BuilderStatus.BUILDING,
+            BuilderStatus.ABORTING,
+        ):
+            raise ValueError(
+                "Builder is not BUILDING|ABORTING when set to BUILDERFAIL"
+            )
+        self.buildstatus = BuildStatus.BUILDERFAIL
+
+    def chrootFail(self):
+        """Cease building because the chroot could not be created or contained
+        a set of package control files which couldn't upgrade themselves, or
+        yet a lot of causes that imply the CHROOT is corrupted not the
+        package.
+        """
+        if self.builderstatus != BuilderStatus.BUILDING:
+            raise ValueError("Builder is not BUILDING when set to CHROOTFAIL")
+        self.buildstatus = BuildStatus.CHROOTFAIL
+
+    def buildFail(self):
+        """Cease building because the package failed to build."""
+        if self.builderstatus != BuilderStatus.BUILDING:
+            raise ValueError("Builder is not BUILDING when set to PACKAGEFAIL")
+        self.buildstatus = BuildStatus.PACKAGEFAIL
+
+    def buildOK(self):
+        """Having passed all possible failure states, mark a build as OK."""
+        if self.builderstatus != BuilderStatus.BUILDING:
+            raise ValueError("Builder is not BUILDING when set to OK")
+        self.buildstatus = BuildStatus.OK
+
+    def depFail(self, dependencies):
+        """Cease building due to a dependency issue."""
+        if self.builderstatus != BuilderStatus.BUILDING:
+            raise ValueError("Builder is not BUILDING when set to DEPFAIL")
+        self.buildstatus = BuildStatus.DEPFAIL
+        self.builddependencies = dependencies
+
+    def giveBack(self):
+        """Give-back package due to a transient buildd/archive issue."""
+        if self.builderstatus != BuilderStatus.BUILDING:
+            raise ValueError("Builder is not BUILDING when set to GIVENBACK")
+        self.buildstatus = BuildStatus.GIVENBACK
+
+    def buildAborted(self):
+        """Mark a build as aborted."""
+        if self.builderstatus != BuilderStatus.ABORTING:
+            raise ValueError("Builder is not ABORTING when set to ABORTED")
+        if self.buildstatus != BuildStatus.BUILDERFAIL:
+            self.buildstatus = BuildStatus.ABORTED
+
+    def buildComplete(self):
+        """Mark the build as complete and waiting interaction from the build
+        daemon master.
+        """
+        if self.builderstatus == BuilderStatus.BUILDING:
+            self.builderstatus = BuilderStatus.WAITING
+        elif self.builderstatus == BuilderStatus.ABORTING:
+            self.buildAborted()
+            self.builderstatus = BuilderStatus.WAITING
+        else:
+            raise ValueError(
+                "Builder is not BUILDING|ABORTING when told build is complete"
+            )
+
+    def sanitizeBuildlog(self, log_path):
+        """Removes passwords from buildlog URLs.
+
+        Because none of the URLs to be processed are expected to span
+        multiple lines and because build log files are potentially huge
+        they will be processed line by line.
+
+        :param log_path: The path to the buildlog file that is to be
+            sanitized.
+        :type log_path: ``str``
+        """
+        # First move the buildlog file that is to be sanitized out of
+        # the way.
+        unsanitized_path = self.cachePath(
+            os.path.basename(log_path) + ".unsanitized"
+        )
+        os.rename(log_path, unsanitized_path)
+
+        # Open the unsanitized buildlog file for reading.
+        unsanitized_file = open(unsanitized_path, "rb")
+
+        # Open the file that will hold the resulting, sanitized buildlog
+        # content for writing.
+        sanitized_file = None
+
+        try:
+            sanitized_file = open(log_path, "wb")
+
+            # Scrub the buildlog file line by line
+            clean_content_iter = _sanitizeURLs(unsanitized_file)
+            for line in clean_content_iter:
+                sanitized_file.write(line)
+        finally:
+            # We're done with scrubbing, close the file handles.
+            unsanitized_file.close()
+            if sanitized_file is not None:
+                sanitized_file.close()
+
+
+class XMLRPCBuilder(xmlrpc.XMLRPC):
+    """XMLRPC builder management interface."""
+
+    def __init__(self, config):
+        xmlrpc.XMLRPC.__init__(self, allowNone=True)
+        # The V1.0 new-style protocol introduces string-style protocol
+        # versions of the form 'MAJOR.MINOR', the protocol is '1.0' for now
+        # implying the presence of /filecache/ /filecache/buildlog and
+        # the reduced and optimised XMLRPC interface.
+        self.protocolversion = "1.0"
+        self.builder = Builder(config)
+        self._managers = {}
+        cache = apt.Cache()
+        try:
+            installed = cache["launchpad-buildd"].installed
+            self._version = installed.version if installed else None
+        except KeyError:
+            self._version = None
+        log.msg("Initialized")
+
+    def registerManager(self, managerclass, managertag):
+        self._managers[managertag] = managerclass
+
+    def xmlrpc_echo(self, *args):
+        """Echo the argument back."""
+        return args
+
+    def xmlrpc_proxy_info(self):
+        """Return the details for the proxy used by the manager."""
+        proxy_fields = ["use_fetch_service", "revocation_endpoint"]
+        return {k: getattr(self.builder.manager, k) for k in proxy_fields}
+
+    def xmlrpc_info(self):
+        """Return the protocol version and the manager methods supported."""
+        return (
+            self.protocolversion,
+            self.builder.getArch(),
+            list(self._managers),
+        )
+
+    def xmlrpc_status(self):
+        """Return the status of the build daemon, as a dictionary.
+
+        Depending on the builder status we return differing amounts of data,
+        but this always includes the builder status itself.
+        """
+        status = self.builder.builderstatus
+        statusname = status.split(".")[-1]
+        func = getattr(self, "status_" + statusname, None)
+        if func is None:
+            raise ValueError("Unknown status '%s'" % status)
+        ret = {"builder_status": status}
+        if self._version is not None:
+            ret["builder_version"] = self._version
+        ret.update(func())
+        if self.builder.manager is not None:
+            ret.update(self.builder.manager.status())
+        return ret
+
+    def status_IDLE(self):
+        """Handler for xmlrpc_status IDLE."""
+        return {}
+
+    def status_BUILDING(self):
+        """Handler for xmlrpc_status BUILDING.
+
+        Returns the build id and up to one kilobyte of log tail.
+        """
+        tail = self.builder.getLogTail()
+        return {"build_id": self.buildid, "logtail": Binary(tail)}
+
+    def status_WAITING(self):
+        """Handler for xmlrpc_status WAITING.
+
+        Returns the build id and the set of files waiting to be returned
+        unless the builder failed in which case we return the buildstatus
+        and the build id but no file set.
+        """
+        ret = {
+            "build_status": self.builder.buildstatus,
+            "build_id": self.buildid,
+        }
+        if self.builder.buildstatus in (
+            BuildStatus.OK,
+            BuildStatus.PACKAGEFAIL,
+            BuildStatus.DEPFAIL,
+        ):
+            ret["filemap"] = self.builder.waitingfiles
+            ret["dependencies"] = self.builder.builddependencies
+        return ret
+
+    def status_ABORTING(self):
+        """Handler for xmlrpc_status ABORTING.
+
+        This state means the builder is performing the ABORT command and is
+        not able to do anything else than answer its status, so returns the
+        build id only.
+        """
+        return {"build_id": self.buildid}
+
+    def xmlrpc_ensurepresent(self, sha1sum, url, username, password):
+        """Attempt to ensure the given file is present."""
+        return self.builder.ensurePresent(sha1sum, url, username, password)
+
+    def xmlrpc_abort(self):
+        """Abort the current build."""
+        self.builder.abort()
+        return BuilderStatus.ABORTING
+
+    def xmlrpc_clean(self):
+        """Clean up the waiting files and reset the builder's state."""
+        self.builder.clean()
+        return BuilderStatus.IDLE
+
+    def xmlrpc_build(self, buildid, managertag, chrootsum, filemap, args):
+        """Check if requested arguments are sane and initiate build procedure
+
+        return a tuple containing: (<builder_status>, <info>)
+
+        """
+        # check requested manager
+        if managertag not in self._managers:
+            extra_info = f"{managertag} not in {list(self._managers)!r}"
+            return (BuilderStatus.UNKNOWNBUILDER, extra_info)
+        # check requested chroot availability
+        chroot_present, info = self.builder.ensurePresent(chrootsum)
+        if not chroot_present:
+            extra_info = f"""CHROOTSUM -> {chrootsum}
+            ***** INFO *****
+            {info}
+            ****************
+            """
+            return (BuilderStatus.UNKNOWNSUM, extra_info)
+        # check requested files availability
+        for filesum in filemap.values():
+            file_present, info = self.builder.ensurePresent(filesum)
+            if not file_present:
+                extra_info = f"""FILESUM -> {filesum}
+                ***** INFO *****
+                {info}
+                ****************
+                """
+                return (BuilderStatus.UNKNOWNSUM, extra_info)
+        # check buildid sanity
+        if buildid is None or buildid == "" or buildid == 0:
+            raise ValueError(buildid)
+
+        # builder is available, buildd is non empty,
+        # filelist is consistent, chrootsum is available, let's initiate...
+        self.buildid = buildid
+        self.builder.startBuild(
+            self._managers[managertag](self.builder, buildid)
+        )
+        self.builder.manager.initiate(filemap, chrootsum, args)
+        return (BuilderStatus.BUILDING, buildid)
diff --git a/lpbuildd/charm.py b/lpbuildd/charm.py
new file mode 100644
index 0000000..c3243bf
--- /dev/null
+++ b/lpbuildd/charm.py
@@ -0,0 +1,99 @@
+# Copyright 2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+from lpbuildd.proxy import BuildManagerProxyMixin
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+class CharmBuildState(DebianBuildState):
+    BUILD_CHARM = "BUILD_CHARM"
+
+
+class CharmBuildManager(BuildManagerProxyMixin, DebianBuildManager):
+    """Build a charm."""
+
+    backend_name = "lxd"
+    initial_build_state = CharmBuildState.BUILD_CHARM
+
+    @property
+    def needs_sanitized_logs(self):
+        return True
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.name = extra_args["name"]
+        self.branch = extra_args.get("branch")
+        self.git_repository = extra_args.get("git_repository")
+        self.git_path = extra_args.get("git_path")
+        self.build_path = extra_args.get("build_path")
+        self.channels = extra_args.get("channels", {})
+        self.proxy_url = extra_args.get("proxy_url")
+        self.revocation_endpoint = extra_args.get("revocation_endpoint")
+        self.proxy_service = None
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the process to build the charm."""
+        args = []
+        args.extend(self.startProxy())
+        if self.revocation_endpoint:
+            args.extend(["--revocation-endpoint", self.revocation_endpoint])
+        for snap, channel in sorted(self.channels.items()):
+            args.extend(["--channel", f"{snap}={channel}"])
+        if self.branch is not None:
+            args.extend(["--branch", self.branch])
+        if self.git_repository is not None:
+            args.extend(["--git-repository", self.git_repository])
+        if self.git_path is not None:
+            args.extend(["--git-path", self.git_path])
+        if self.build_path is not None:
+            args.extend(["--build-path", self.build_path])
+        args.append(self.name)
+        self.runTargetSubProcess("build-charm", *args)
+
+    def iterate_BUILD_CHARM(self, retcode):
+        """Finished building the charm."""
+        self.stopProxy()
+        self.revokeProxyToken()
+        if retcode == RETCODE_SUCCESS:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD
+        ):
+            if not self.alreadyfailed:
+                self._builder.buildFail()
+                print("Returning build status: Build failed.")
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                print("Returning build status: Builder failed.")
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_BUILD_CHARM(self, retcode):
+        """Finished reaping after building the charm."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache."""
+        output_path = os.path.join("/home/buildd", self.name)
+        if self.build_path is not None:
+            output_path = os.path.join(output_path, self.build_path)
+        if self.backend.path_exists(output_path):
+            for entry in sorted(self.backend.listdir(output_path)):
+                path = os.path.join(output_path, entry)
+                if self.backend.islink(path):
+                    continue
+                if entry.endswith((".charm", ".manifest")):
+                    self.addWaitingFileFromBackend(path)
diff --git a/lpbuildd/check_implicit_pointer_functions.py b/lpbuildd/check_implicit_pointer_functions.py
new file mode 100755
index 0000000..e25b0ac
--- /dev/null
+++ b/lpbuildd/check_implicit_pointer_functions.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python3
+
+#
+# Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+#       David Mosberger <davidm@xxxxxxxxxx>
+# Copyright 2010-2020 Canonical Ltd.
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+# Scan standard input for GCC warning messages that are likely to
+# source of real 64-bit problems.  In particular, see whether there
+# are any implicitly declared functions whose return values are later
+# interpreted as pointers.  Those are almost guaranteed to cause
+# crashes.
+
+import re
+
+implicit_pattern = re.compile(
+    rb"([^:]*):(\d+):(\d+:)? warning: implicit declaration "
+    rb"of function [`']([^']*)'"
+)
+pointer_pattern = re.compile(
+    rb"([^:]*):(\d+):(\d+:)? warning: "
+    rb"("
+    rb"(assignment"
+    rb"|initialization"
+    rb"|return"
+    rb"|passing arg \d+ of `[^']*'"
+    rb"|passing arg \d+ of pointer to function"
+    rb") makes pointer from integer without a cast"
+    rb"|"
+    rb"cast to pointer from integer of different size)"
+)
+
+
+def filter_log(in_file, out_file, in_line=False):
+    last_implicit_filename = b""
+    last_implicit_linenum = -1
+    last_implicit_func = b""
+
+    errlist = []
+
+    while True:
+        line = in_file.readline()
+        if in_line:
+            out_file.write(line)
+            out_file.flush()
+        if line == b"":
+            break
+        m = implicit_pattern.match(line)
+        if m:
+            last_implicit_filename = m.group(1)
+            last_implicit_linenum = int(m.group(2))
+            last_implicit_func = m.group(4)
+        else:
+            m = pointer_pattern.match(line)
+            if m:
+                pointer_filename = m.group(1)
+                pointer_linenum = int(m.group(2))
+                if (
+                    last_implicit_filename == pointer_filename
+                    and last_implicit_linenum == pointer_linenum
+                ):
+                    err = (
+                        b"Function `%s' implicitly converted to pointer at "
+                        b"%s:%d"
+                        % (
+                            last_implicit_func,
+                            last_implicit_filename,
+                            last_implicit_linenum,
+                        )
+                    )
+                    errlist.append(err)
+                    out_file.write(err + b"\n")
+
+    if errlist:
+        if in_line:
+            out_file.write(b"\n".join(errlist) + b"\n\n")
+            out_file.write(
+                b"""
+
+Our automated build log filter detected the problem(s) above that will
+likely cause your package to segfault on architectures where the size of
+a pointer is greater than the size of an integer, such as ia64 and amd64.
+
+This is often due to a missing function prototype definition.
+
+Since use of implicitly converted pointers is always fatal to the application
+on ia64, they are errors.  Please correct them for your next upload.
+
+More information can be found at:
+http://wiki.debian.org/ImplicitPointerConversions
+
+    """
+            )
+    return len(errlist)
diff --git a/lpbuildd/ci.py b/lpbuildd/ci.py
new file mode 100644
index 0000000..bf762d5
--- /dev/null
+++ b/lpbuildd/ci.py
@@ -0,0 +1,282 @@
+# Copyright 2022 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+from configparser import NoOptionError, NoSectionError
+
+import yaml
+from twisted.internet import defer
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+from lpbuildd.proxy import BuildManagerProxyMixin
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+# These must match the names of `RevisionStatusResult` enumeration items in
+# Launchpad.
+RESULT_SUCCEEDED = "SUCCEEDED"
+RESULT_FAILED = "FAILED"
+
+
+def _make_job_id(job_name, job_index):
+    return f"{job_name}:{job_index}"
+
+
+class CIBuildState(DebianBuildState):
+    PREPARE = "PREPARE"
+    RUN_JOB = "RUN_JOB"
+
+
+class CIBuildManager(BuildManagerProxyMixin, DebianBuildManager):
+    """Run CI jobs."""
+
+    backend_name = "lxd"
+    initial_build_state = CIBuildState.PREPARE
+
+    @property
+    def needs_sanitized_logs(self):
+        return True
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.jobs = extra_args["jobs"]
+        if not self.jobs:
+            raise ValueError("Must request at least one job")
+        self.branch = extra_args.get("branch")
+        self.git_repository = extra_args.get("git_repository")
+        self.git_path = extra_args.get("git_path")
+        self.channels = extra_args.get("channels", {})
+        self.proxy_url = extra_args.get("proxy_url")
+        self.revocation_endpoint = extra_args.get("revocation_endpoint")
+        self.proxy_service = None
+        self.job_status = {}
+        self.package_repositories = extra_args.get("package_repositories")
+        self.environment_variables = extra_args.get("environment_variables")
+        self.plugin_settings = extra_args.get("plugin_settings")
+        self.secrets = extra_args.get("secrets")
+        self.scan_malware = extra_args.get("scan_malware", False)
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Start running CI jobs."""
+        self.proxy_args = self.startProxy()
+        if self.revocation_endpoint:
+            self.proxy_args.extend(
+                ["--revocation-endpoint", self.revocation_endpoint]
+            )
+        args = list(self.proxy_args)
+        for snap, channel in sorted(self.channels.items()):
+            args.extend(["--channel", f"{snap}={channel}"])
+        if self.branch is not None:
+            args.extend(["--branch", self.branch])
+        if self.git_repository is not None:
+            args.extend(["--git-repository", self.git_repository])
+        if self.git_path is not None:
+            args.extend(["--git-path", self.git_path])
+        if self.scan_malware:
+            args.append("--scan-malware")
+        try:
+            # Not precisely a proxy, but it's similar in the sense of
+            # providing additional network endpoints that we use instead of
+            # the default behaviour, and using a section that doesn't exist
+            # in the default configuration is convenient for our production
+            # deployments.
+            clamav_database_url = self._builder._config.get(
+                "proxy", "clamavdatabase"
+            )
+            args.extend(["--clamav-database-url", clamav_database_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        try:
+            snap_store_proxy_url = self._builder._config.get(
+                "proxy", "snapstore"
+            )
+            args.extend(["--snap-store-proxy-url", snap_store_proxy_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        self.runTargetSubProcess("run-ci-prepare", *args)
+
+    @property
+    def current_job(self):
+        return self.jobs[self.stage_index][self.job_index]
+
+    @property
+    def has_current_job(self):
+        try:
+            self.current_job
+            return True
+        except IndexError:
+            return False
+
+    def iterate_PREPARE(self, retcode):
+        """Finished preparing for running CI jobs."""
+        self.stage_index = 0
+        self.job_index = 0
+        if retcode == RETCODE_SUCCESS:
+            pass
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD
+        ):
+            if not self.alreadyfailed:
+                self._builder.log("Preparation failed.")
+                self._builder.buildFail()
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+            self.alreadyfailed = True
+        if self.has_current_job and not self.alreadyfailed:
+            self._state = CIBuildState.RUN_JOB
+            self.runNextJob()
+        else:
+            self.stopProxy()
+            self.revokeProxyToken()
+            self.doReapProcesses(self._state)
+
+    def iterateReap_PREPARE(self, retcode):
+        """Finished reaping after preparing for running CI jobs.
+
+        This only happens if preparation failed or there were no jobs to run.
+        """
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    @staticmethod
+    def _makeJobID(job_name, job_index):
+        return f"{job_name}:{job_index}"
+
+    def runNextJob(self):
+        """Run the next CI job."""
+        args = list(self.proxy_args)
+        if self.package_repositories is not None:
+            for repository in self.package_repositories:
+                args.extend(["--package-repository", repository])
+        if self.environment_variables is not None:
+            for key, value in self.environment_variables.items():
+                args.extend(["--environment-variable", f"{key}={value}"])
+        if self.plugin_settings is not None:
+            for key, value in self.plugin_settings.items():
+                args.extend(["--plugin-setting", f"{key}={value}"])
+        if self.secrets is not None:
+            text = yaml.dump(self.secrets)
+            with self.backend.open(
+                "/build/.launchpad-secrets.yaml", mode="w"
+            ) as f:
+                f.write(text)
+            args.extend(["--secrets", "/build/.launchpad-secrets.yaml"])
+        if self.scan_malware:
+            args.append("--scan-malware")
+
+        job_name, job_index = self.current_job
+        self.current_job_id = _make_job_id(job_name, job_index)
+        args.extend([job_name, str(job_index)])
+        self.runTargetSubProcess("run-ci", *args)
+
+    @defer.inlineCallbacks
+    def iterate_RUN_JOB(self, retcode):
+        """Finished running a CI job.
+
+        This state is repeated for each CI job in the pipeline.
+        """
+        if retcode == RETCODE_SUCCESS:
+            result = RESULT_SUCCEEDED
+        else:
+            result = RESULT_FAILED
+            if (
+                retcode >= RETCODE_FAILURE_INSTALL
+                and retcode <= RETCODE_FAILURE_BUILD
+            ):
+                self._builder.log("Job %s failed." % self.current_job_id)
+                if len(self.jobs[self.stage_index]) == 1:
+                    # Single-job stage, so fail straight away in order to
+                    # get simpler error messages.
+                    if not self.alreadyfailed:
+                        self._builder.buildFail()
+                    self.alreadyfailed = True
+            else:
+                if not self.alreadyfailed:
+                    self._builder.builderFail()
+                self.alreadyfailed = True
+        yield self.deferGatherResults(reap=False)
+        self.job_status[self.current_job_id]["result"] = result
+
+        self.job_index += 1
+        if self.job_index >= len(self.jobs[self.stage_index]):
+            # End of stage.  Fail if any job in this stage has failed.
+            current_stage_job_ids = [
+                _make_job_id(job_name, job_index)
+                for job_name, job_index in self.jobs[self.stage_index]
+            ]
+            if any(
+                self.job_status[job_id]["result"] != RESULT_SUCCEEDED
+                for job_id in current_stage_job_ids
+            ):
+                if not self.alreadyfailed:
+                    self._builder.log(
+                        "Some jobs in %s failed; stopping."
+                        % current_stage_job_ids
+                    )
+                    self._builder.buildFail()
+                self.alreadyfailed = True
+            self.stage_index += 1
+            self.job_index = 0
+
+        if self.has_current_job and not self.alreadyfailed:
+            self.runNextJob()
+        else:
+            self.stopProxy()
+            self.revokeProxyToken()
+            self.doReapProcesses(self._state)
+
+    def iterateReap_RUN_JOB(self, retcode):
+        """Finished reaping after running a CI job.
+
+        This only happens if the job failed or there were no more jobs to run.
+        """
+        self.iterateReap_PREPARE(retcode)
+
+    def status(self):
+        """See `BuildManager.status`."""
+        status = super().status()
+        status["jobs"] = dict(self.job_status)
+        return status
+
+    def gatherResults(self):
+        """Gather the results of the CI job that just completed.
+
+        This is called once for each CI job in the pipeline.
+        """
+        job_status = {}
+        job_name, job_index = self.current_job
+        job_output_path = os.path.join(
+            "/build", "output", job_name, str(job_index)
+        )
+        for item_name in ("log", "properties"):
+            item_path = os.path.join(job_output_path, item_name)
+            if self.backend.path_exists(item_path):
+                item_id = f"{self.current_job_id}.{item_name}"
+                self.addWaitingFileFromBackend(item_path, name=item_id)
+                job_status[item_name] = self._builder.waitingfiles[item_id]
+        files_path = os.path.join(job_output_path, "files")
+        if self.backend.path_exists(files_path):
+            for entry in sorted(
+                self.backend.find(files_path, include_directories=False)
+            ):
+                path = os.path.join(files_path, entry)
+                if self.backend.islink(path):
+                    continue
+                entry_base = os.path.basename(entry)
+                name = os.path.join(self.current_job_id, entry_base)
+                self.addWaitingFileFromBackend(path, name=name)
+                job_status.setdefault("output", {})[entry_base] = (
+                    self._builder.waitingfiles[name]
+                )
+
+        # Save a file map for this job in the extra status file.  This
+        # allows buildd-manager to fetch job logs/output incrementally
+        # rather than having to wait for the entire CI job to finish.
+        self.job_status[self.current_job_id] = job_status
diff --git a/lpbuildd/debian.py b/lpbuildd/debian.py
new file mode 100644
index 0000000..5e52884
--- /dev/null
+++ b/lpbuildd/debian.py
@@ -0,0 +1,351 @@
+# Copyright 2009-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+# Authors: Daniel Silverstone <daniel.silverstone@xxxxxxxxxxxxx>
+#      and Adam Conrad <adam.conrad@xxxxxxxxxxxxx>
+
+import base64
+import os
+import re
+import signal
+from configparser import NoOptionError, NoSectionError
+
+from twisted.internet import defer, threads
+from twisted.python import log
+
+from lpbuildd.builder import BuildManager, get_build_path
+
+
+class DebianBuildState:
+    """States for the DebianBuildManager."""
+
+    INIT = "INIT"
+    UNPACK = "UNPACK"
+    MOUNT = "MOUNT"
+    SOURCES = "SOURCES"
+    KEYS = "KEYS"
+    UPDATE = "UPDATE"
+    UMOUNT = "UMOUNT"
+    CLEANUP = "CLEANUP"
+
+
+class DebianBuildManager(BuildManager):
+    """Base behaviour for Debian chrooted builds."""
+
+    def __init__(self, builder, buildid, **kwargs):
+        BuildManager.__init__(self, builder, buildid, **kwargs)
+        self._cachepath = builder._config.get("builder", "filecache")
+        self._state = DebianBuildState.INIT
+        builder.emptyLog()
+        self.alreadyfailed = False
+        self._iterator = None
+
+    @property
+    def initial_build_state(self):
+        raise NotImplementedError()
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.sources_list = extra_args.get("archives")
+        self.trusted_keys = extra_args.get("trusted_keys")
+
+        BuildManager.initiate(self, files, chroot, extra_args)
+
+    def doSourcesList(self):
+        """Override apt/sources.list.
+
+        Mainly used for PPA builds.
+        """
+        args = []
+        try:
+            apt_proxy_url = self._builder._config.get("proxy", "apt")
+            args.extend(["--apt-proxy-url", apt_proxy_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        args.extend(self.sources_list)
+        self.runTargetSubProcess("override-sources-list", *args)
+
+    def doTrustedKeys(self):
+        """Add trusted keys."""
+        trusted_keys = b"".join(
+            base64.b64decode(key) for key in self.trusted_keys
+        )
+        self.runTargetSubProcess("add-trusted-keys", stdin=trusted_keys)
+
+    def doUpdateChroot(self):
+        """Perform the chroot upgrade."""
+        self.runTargetSubProcess("update-debian-chroot")
+
+    def doRunBuild(self):
+        """Run the main build process.
+
+        Subclasses must override this.
+        """
+        raise NotImplementedError()
+
+    @staticmethod
+    def _parseChangesFile(linesIter):
+        """A generator that iterates over files listed in a changes file.
+
+        :param linesIter: an iterable of lines in a changes file.
+        """
+        seenfiles = False
+        for line in linesIter:
+            if line.endswith("\n"):
+                line = line[:-1]
+            if not seenfiles and line.startswith("Files:"):
+                seenfiles = True
+            elif seenfiles:
+                if not line.startswith(" "):
+                    break
+                filename = line.split(" ")[-1]
+                yield filename
+
+    def getChangesFilename(self):
+        changes = self._dscfile[:-4] + "_" + self.arch_tag + ".changes"
+        return get_build_path(self.home, self._buildid, changes)
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache.
+
+        The primary file we care about is the .changes file. We key from there.
+        """
+        path = self.getChangesFilename()
+        self._builder.addWaitingFile(path)
+
+        with open(path, errors="replace") as chfile:
+            for fn in self._parseChangesFile(chfile):
+                self._builder.addWaitingFile(
+                    get_build_path(self.home, self._buildid, fn)
+                )
+
+    def deferGatherResults(self, reap=True):
+        """Gather the results of the build in a thread."""
+
+        # XXX cjwatson 2018-10-04: Refactor using inlineCallbacks once we're
+        # on Twisted >= 18.7.0 (https://twistedmatrix.com/trac/ticket/4632).
+        def failed_to_gather(failure):
+            if failure.check(defer.CancelledError):
+                if not self.alreadyfailed:
+                    self._builder.log("Build cancelled unexpectedly!\n")
+                    self._builder.buildFail()
+            else:
+                self._builder.log(
+                    "Failed to gather results: %s\n" % failure.value
+                )
+                self._builder.buildFail()
+            self.alreadyfailed = True
+
+        def reap_processes(ignored):
+            self.doReapProcesses(self._state)
+
+        d = threads.deferToThread(self.gatherResults).addErrback(
+            failed_to_gather
+        )
+        if reap:
+            d.addCallback(reap_processes)
+        return d
+
+    @defer.inlineCallbacks
+    def iterate(self, success, quiet=False):
+        # When a Twisted ProcessControl class is killed by SIGTERM,
+        # which we call 'build process aborted', 'None' is returned as
+        # exit_code.
+        if self.alreadyfailed and success == 0:
+            # We may have been aborted in between subprocesses; pretend that
+            # we were terminated by a signal, which is close enough.
+            success = 128 + signal.SIGKILL
+        if not quiet:
+            log.msg(
+                "Iterating with success flag %s against stage %s"
+                % (success, self._state)
+            )
+        func = getattr(self, "iterate_" + self._state, None)
+        if func is None:
+            raise ValueError("Unknown internal state " + self._state)
+        self._iterator = func(success)
+        yield self._iterator
+        self._iterator = None
+
+    def iterateReap(self, state, success):
+        log.msg(
+            "Iterating with success flag %s against stage %s after "
+            "reaping processes" % (success, state)
+        )
+        func = getattr(self, "iterateReap_" + state, None)
+        if func is None:
+            raise ValueError("Unknown internal post-reap state " + state)
+        func(success)
+
+    def iterate_INIT(self, success):
+        """Just finished initializing the build."""
+        if success != 0:
+            if not self.alreadyfailed:
+                # The init failed, can't fathom why that would be...
+                self._builder.builderFail()
+                self.alreadyfailed = True
+            self._state = DebianBuildState.CLEANUP
+            self.doCleanup()
+        else:
+            self._state = DebianBuildState.UNPACK
+            self.doUnpack()
+
+    def iterate_UNPACK(self, success):
+        """Just finished unpacking the tarball."""
+        if success != 0:
+            if not self.alreadyfailed:
+                # The unpack failed for some reason...
+                self._builder.chrootFail()
+                self.alreadyfailed = True
+            self._state = DebianBuildState.CLEANUP
+            self.doCleanup()
+        else:
+            self._state = DebianBuildState.MOUNT
+            self.doMounting()
+
+    def iterate_MOUNT(self, success):
+        """Just finished doing the mounts."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.chrootFail()
+                self.alreadyfailed = True
+            self._state = DebianBuildState.UMOUNT
+            self.doUnmounting()
+        else:
+            if self.sources_list is not None:
+                self._state = DebianBuildState.SOURCES
+                self.doSourcesList()
+            elif self.trusted_keys:
+                self._state = DebianBuildState.KEYS
+                self.doTrustedKeys()
+            else:
+                self._state = DebianBuildState.UPDATE
+                self.doUpdateChroot()
+
+    def searchLogContents(
+        self, patterns_and_flags, stop_patterns_and_flags=[]
+    ):
+        """Search for any of a list of regex patterns in the build log.
+
+        The build log is matched using a sliding window, which avoids having
+        to read the whole file into memory at once but requires that matches
+        be no longer than the chunk size (currently 256KiB).
+
+        If any of the stop patterns are matched, the search stops
+        immediately without reading the rest of the file.
+
+        :return: A tuple of the regex pattern that matched and the match
+            object, or (None, None).
+        """
+        chunk_size = 256 * 1024
+        regexes = [
+            re.compile(pattern.encode("UTF-8"), flags)
+            for pattern, flags in patterns_and_flags
+        ]
+        stop_regexes = [
+            re.compile(pattern.encode("UTF-8"), flags)
+            for pattern, flags in stop_patterns_and_flags
+        ]
+        buildlog_path = os.path.join(self._cachepath, "buildlog")
+        with open(buildlog_path, "rb") as buildlog:
+            window = b""
+            chunk = buildlog.read(chunk_size)
+            while chunk:
+                window += chunk
+                for regex in regexes:
+                    match = regex.search(window)
+                    if match is not None:
+                        return regex.pattern.decode("UTF-8"), match
+                for regex in stop_regexes:
+                    if regex.search(window) is not None:
+                        return None, None
+                if len(window) > chunk_size:
+                    window = window[chunk_size:]
+                chunk = buildlog.read(chunk_size)
+        return None, None
+
+    def iterate_SOURCES(self, success):
+        """Just finished overwriting sources.list."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.chrootFail()
+                self.alreadyfailed = True
+            self.doReapProcesses(self._state)
+        elif self.trusted_keys:
+            self._state = DebianBuildState.KEYS
+            self.doTrustedKeys()
+        else:
+            self._state = DebianBuildState.UPDATE
+            self.doUpdateChroot()
+
+    def iterateReap_SOURCES(self, success):
+        """Just finished reaping after failure to overwrite sources.list."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def iterate_KEYS(self, success):
+        """Just finished adding trusted keys."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.chrootFail()
+                self.alreadyfailed = True
+            self.doReapProcesses(self._state)
+        else:
+            self._state = DebianBuildState.UPDATE
+            self.doUpdateChroot()
+
+    def iterateReap_KEYS(self, success):
+        """Just finished reaping after failure to add trusted keys."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def iterate_UPDATE(self, success):
+        """Just finished updating the chroot."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.chrootFail()
+                self.alreadyfailed = True
+            self.doReapProcesses(self._state)
+        else:
+            self._state = self.initial_build_state
+            self.doRunBuild()
+
+    def iterateReap_UPDATE(self, success):
+        """Just finished reaping after failure to update the chroot."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def iterate_UMOUNT(self, success):
+        """Just finished doing the unmounting."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                self.alreadyfailed = True
+        self._state = DebianBuildState.CLEANUP
+        self.doCleanup()
+
+    def iterate_CLEANUP(self, success):
+        """Just finished the cleanup."""
+        if success != 0:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                self.alreadyfailed = True
+        else:
+            # Successful clean
+            if not self.alreadyfailed:
+                self._builder.buildOK()
+        self._builder.buildComplete()
+
+    def abortReap(self):
+        """Abort by killing all processes in the chroot, as hard as we can.
+
+        Overridden here to handle state management.
+        """
+        self.doReapProcesses(self._state, notify=False)
+
+    def abort(self):
+        """See `BuildManager`."""
+        super().abort()
+        if self._iterator is not None:
+            self._iterator.cancel()
+            self._iterator = None
diff --git a/lpbuildd/livefs.py b/lpbuildd/livefs.py
new file mode 100644
index 0000000..6e8da92
--- /dev/null
+++ b/lpbuildd/livefs.py
@@ -0,0 +1,112 @@
+# Copyright 2013-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+from configparser import NoOptionError, NoSectionError
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+class LiveFilesystemBuildState(DebianBuildState):
+    BUILD_LIVEFS = "BUILD_LIVEFS"
+
+
+class LiveFilesystemBuildManager(DebianBuildManager):
+    """Build a live filesystem."""
+
+    backend_name = "lxd"
+    initial_build_state = LiveFilesystemBuildState.BUILD_LIVEFS
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.subarch = extra_args.get("subarch")
+        self.project = extra_args["project"]
+        self.subproject = extra_args.get("subproject")
+        self.pocket = extra_args["pocket"]
+        self.datestamp = extra_args.get("datestamp")
+        self.image_format = extra_args.get("image_format")
+        self.locale = extra_args.get("locale")
+        self.extra_ppas = extra_args.get("extra_ppas", [])
+        self.extra_snaps = extra_args.get("extra_snaps", [])
+        self.channel = extra_args.get("channel")
+        self.image_targets = extra_args.get("image_targets", [])
+        self.repo_snapshot_stamp = extra_args.get("repo_snapshot_stamp")
+        self.cohort_key = extra_args.get("cohort-key")
+        self.debug = extra_args.get("debug", False)
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the process to build the live filesystem."""
+        args = []
+        if self.subarch:
+            args.extend(["--subarch", self.subarch])
+        args.extend(["--project", self.project])
+        if self.subproject:
+            args.extend(["--subproject", self.subproject])
+        if self.datestamp:
+            args.extend(["--datestamp", self.datestamp])
+        if self.image_format:
+            args.extend(["--image-format", self.image_format])
+        if self.pocket == "proposed":
+            args.append("--proposed")
+        if self.locale:
+            args.extend(["--locale", self.locale])
+        for ppa in self.extra_ppas:
+            args.extend(["--extra-ppa", ppa])
+        for snap in self.extra_snaps:
+            args.extend(["--extra-snap", snap])
+        if self.channel:
+            args.extend(["--channel", self.channel])
+        for image_target in self.image_targets:
+            args.extend(["--image-target", image_target])
+        if self.repo_snapshot_stamp:
+            args.extend(["--repo-snapshot-stamp", self.repo_snapshot_stamp])
+        if self.cohort_key:
+            args.extend(["--cohort-key", self.cohort_key])
+        try:
+            snap_store_proxy_url = self._builder._config.get(
+                "proxy", "snapstore"
+            )
+            args.extend(["--snap-store-proxy-url", snap_store_proxy_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        if self.debug:
+            args.append("--debug")
+        self.runTargetSubProcess("buildlivefs", *args)
+
+    def iterate_BUILD_LIVEFS(self, retcode):
+        """Finished building the live filesystem."""
+        if retcode == RETCODE_SUCCESS:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD
+        ):
+            if not self.alreadyfailed:
+                self._builder.buildFail()
+                print("Returning build status: Build failed.")
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                print("Returning build status: Builder failed.")
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_BUILD_LIVEFS(self, retcode):
+        """Finished reaping after building the live filesystem."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache."""
+        for entry in sorted(self.backend.listdir("/build")):
+            path = os.path.join("/build", entry)
+            if entry.startswith("livecd.") and not self.backend.islink(path):
+                self.addWaitingFileFromBackend(path)
diff --git a/lpbuildd/log.py b/lpbuildd/log.py
new file mode 100644
index 0000000..81337f9
--- /dev/null
+++ b/lpbuildd/log.py
@@ -0,0 +1,41 @@
+# Copyright 2009-2015 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import signal
+import sys
+
+from twisted.internet import reactor
+from twisted.python import log, logfile
+from zope.interface import implementer
+
+
+@implementer(log.ILogObserver)
+class RotatableFileLogObserver:
+    """A log observer that uses a log file and reopens it on SIGHUP."""
+
+    def __init__(self, logfilepath):
+        """Set up the logfile and possible signal handler.
+
+        Installs the signal handler for SIGHUP to make the process re-open
+        the log file.
+
+        :param logfilepath: The path to the logfile. If None, stdout is used
+            for logging and no signal handler will be installed.
+        """
+        if logfilepath is None:
+            logFile = sys.stdout
+        else:
+            logFile = logfile.LogFile.fromFullPath(
+                logfilepath, rotateLength=None
+            )
+            # Override if signal is set to None or SIG_DFL (0)
+            if not signal.getsignal(signal.SIGHUP):
+
+                def signalHandler(signal, frame):
+                    reactor.callFromThread(logFile.reopen)
+
+                signal.signal(signal.SIGHUP, signalHandler)
+        self.observer = log.FileLogObserver(logFile)
+
+    def __call__(self, eventDict):
+        self.observer.emit(eventDict)
diff --git a/lpbuildd/oci.py b/lpbuildd/oci.py
new file mode 100644
index 0000000..5ce2cd8
--- /dev/null
+++ b/lpbuildd/oci.py
@@ -0,0 +1,283 @@
+# Copyright 2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import gzip
+import hashlib
+import json
+import os
+import shutil
+import tarfile
+import tempfile
+from configparser import NoOptionError, NoSectionError
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+from lpbuildd.proxy import BuildManagerProxyMixin
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+class OCIBuildState(DebianBuildState):
+    BUILD_OCI = "BUILD_OCI"
+
+
+class OCIBuildManager(BuildManagerProxyMixin, DebianBuildManager):
+    """Build an OCI Image."""
+
+    backend_name = "lxd"
+    initial_build_state = OCIBuildState.BUILD_OCI
+
+    @property
+    def needs_sanitized_logs(self):
+        return True
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.name = extra_args["name"]
+        self.branch = extra_args.get("branch")
+        self.git_repository = extra_args.get("git_repository")
+        self.git_path = extra_args.get("git_path")
+        self.build_file = extra_args.get("build_file")
+        self.build_args = extra_args.get("build_args", {})
+        self.build_path = extra_args.get("build_path")
+        self.proxy_url = extra_args.get("proxy_url")
+        self.revocation_endpoint = extra_args.get("revocation_endpoint")
+        self.proxy_service = None
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the process to build the OCI image."""
+        args = []
+        args.extend(self.startProxy())
+        if self.revocation_endpoint:
+            args.extend(["--revocation-endpoint", self.revocation_endpoint])
+        if self.branch is not None:
+            args.extend(["--branch", self.branch])
+        if self.git_repository is not None:
+            args.extend(["--git-repository", self.git_repository])
+        if self.git_path is not None:
+            args.extend(["--git-path", self.git_path])
+        if self.build_file is not None:
+            args.extend(["--build-file", self.build_file])
+        if self.build_args:
+            for k, v in self.build_args.items():
+                args.extend(["--build-arg", f"{k}={v}"])
+        if self.build_path is not None:
+            args.extend(["--build-path", self.build_path])
+        try:
+            snap_store_proxy_url = self._builder._config.get(
+                "proxy", "snapstore"
+            )
+            args.extend(["--snap-store-proxy-url", snap_store_proxy_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        args.append(self.name)
+        self.runTargetSubProcess("build-oci", *args)
+
+    def iterate_BUILD_OCI(self, retcode):
+        """Finished building the OCI image."""
+        self.stopProxy()
+        self.revokeProxyToken()
+        if retcode == RETCODE_SUCCESS:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD
+        ):
+            if not self.alreadyfailed:
+                self._builder.buildFail()
+                print("Returning build status: Build failed.")
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                print("Returning build status: Builder failed.")
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_BUILD_OCI(self, retcode):
+        """Finished reaping after building the OCI image."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def _calculateLayerSha(self, layer_path):
+        with open(layer_path, "rb") as layer_tar:
+            sha256_hash = hashlib.sha256()
+            for byte_block in iter(lambda: layer_tar.read(4096), b""):
+                sha256_hash.update(byte_block)
+            digest = sha256_hash.hexdigest()
+            return digest
+
+    def _gatherManifestSection(self, section, extract_path, sha_directory):
+        config_file_path = os.path.join(extract_path, section["Config"])
+        self._builder.addWaitingFile(config_file_path)
+        with open(config_file_path) as config_fp:
+            config = json.load(config_fp)
+        diff_ids = config["rootfs"]["diff_ids"]
+        digest_diff_map = {}
+        for diff_id, layer_id in zip(diff_ids, section["Layers"]):
+            layer_id = layer_id.split("/")[0]
+            diff_file = os.path.join(sha_directory, diff_id.split(":")[1])
+            layer_path = os.path.join(extract_path, f"{layer_id}.tar.gz")
+            self._builder.addWaitingFile(layer_path)
+            # If we have a mapping between diff and existing digest,
+            # this means this layer has been pulled from a remote.
+            # We should maintain the same digest to achieve layer reuse
+            if os.path.exists(diff_file):
+                with open(diff_file) as diff_fp:
+                    diff = json.load(diff_fp)
+                    # We should be able to just take the first occurence,
+                    # as that will be the 'most parent' image
+                    digest = diff[0]["Digest"]
+                    source = diff[0]["SourceRepository"]
+            # If the layer has been build locally, we need to generate the
+            # digest and then set the source to empty
+            else:
+                source = ""
+                digest = self._calculateLayerSha(layer_path)
+            digest_diff_map[diff_id] = {
+                "digest": digest,
+                "source": source,
+                "layer_id": layer_id,
+            }
+
+        return digest_diff_map
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache."""
+        extract_path = tempfile.mkdtemp(prefix=self.name)
+        try:
+            proc = self.backend.run(
+                ["docker", "save", self.name],
+                get_output=True,
+                return_process=True,
+            )
+            tar = tarfile.open(fileobj=proc.stdout, mode="r|")
+        except Exception as e:
+            self._builder.log(f"Unable to save image: {e}")
+            raise
+
+        current_dir = ""
+        gzip_layer = None
+        symlinks = []
+        try:
+            # The tarfile is a stream and must be processed in order
+            for file in tar:
+                self._builder.log(f"Processing tar file: {file.name}")
+                # Directories are just nodes, you can't extract the children
+                # directly, so keep track of what dir we're in.
+                if file.isdir():
+                    current_dir = file.name
+                    if gzip_layer:
+                        # Close the old directory if we have one
+                        gzip_layer.close()
+                if file.issym():
+                    # symlinks can't be extracted or derefenced from a stream
+                    # as you can't seek backwards.
+                    # Work out what the symlink is referring to, then
+                    # we can deal with it later
+                    self._builder.log(
+                        f"Found symlink at {file.name} referencing "
+                        f"{file.linkpath}"
+                    )
+                    symlinks.append(file)
+                    continue
+                if current_dir and file.name.endswith("layer.tar"):
+                    # This is the actual layer data.
+                    # Instead of adding the layer.tar to a gzip directory
+                    # we add the contents of untarred layer.tar to a gzip.
+                    # Now instead of having a gz directory in the form:
+                    # directory.tar.gz/layer.tar/contents
+                    # we will have: layer.tar.gz/contents. This final gz format
+                    # will have to have the name of the directory
+                    # (directory_name.tar.gz/contents) otherwise we will endup
+                    # with multiple gzips with the same name "layer.tar.gz".
+                    fileobj = tar.extractfile(file)
+                    name = os.path.join(extract_path, f"{current_dir}.tar.gz")
+                    with gzip.GzipFile(name, "wb") as gzip_layer:
+                        byte = fileobj.read(1)
+                        while len(byte) > 0:
+                            gzip_layer.write(byte)
+                            byte = fileobj.read(1)
+                elif current_dir and file.name.startswith(current_dir):
+                    # Other files that are in the layer directories,
+                    # we don't care about
+                    continue
+                else:
+                    # If it's not in a directory, we need that
+                    tar.extract(file, extract_path)
+        except Exception as e:
+            self._builder.log(f"Tar file processing failed: {e}")
+            raise
+        finally:
+            if gzip_layer is not None:
+                gzip_layer.close()
+            fileobj.close()
+
+        # deal with any symlinks we had
+        for symlink in symlinks:
+            # These are paths that finish in "<layer_id>/layer.tar"
+            # we want the directory name, which should always be
+            # the second component
+            source_name = os.path.join(
+                extract_path, f"{symlink.linkpath.split('/')[-2]}.tar.gz"
+            )
+            target_name = os.path.join(
+                extract_path, f"{symlink.name.split('/')[-2]}.tar.gz"
+            )
+            # Do a copy to dereference the symlink
+            self._builder.log(
+                f"Dereferencing symlink from {source_name} to {target_name}"
+            )
+            shutil.copy(source_name, target_name)
+
+        # We need these mapping files
+        sha_directory = tempfile.mkdtemp()
+        # This can change depending on the kernel options / docker package
+        # used. This is correct for bionic buildd image
+        # with apt installed docker.
+        sha_path = (
+            "/var/lib/docker/image/"
+            "vfs/distribution/v2metadata-by-diffid/sha256"
+        )
+        # If there have been no images pulled in the build process
+        # (FROM scratch), then this directory will not exist and
+        # we will have no contents from it.
+        if self.backend.path_exists(sha_path):
+            sha_files = [
+                x
+                for x in self.backend.listdir(sha_path)
+                if not x.startswith(".")
+            ]
+            for file in sha_files:
+                self.backend.copy_out(
+                    os.path.join(sha_path, file),
+                    os.path.join(sha_directory, file),
+                )
+        else:
+            self._builder.log(f"No metadata directory at {sha_path}")
+
+        # Parse the manifest for the other files we need
+        manifest_path = os.path.join(extract_path, "manifest.json")
+        self._builder.addWaitingFile(manifest_path)
+        with open(manifest_path) as manifest_fp:
+            manifest = json.load(manifest_fp)
+
+        digest_maps = []
+        try:
+            for section in manifest:
+                digest_maps.append(
+                    self._gatherManifestSection(
+                        section, extract_path, sha_directory
+                    )
+                )
+            digest_map_file = os.path.join(extract_path, "digests.json")
+            with open(digest_map_file, "w") as digest_map_fp:
+                json.dump(digest_maps, digest_map_fp)
+            self._builder.addWaitingFile(digest_map_file)
+        except Exception as e:
+            self._builder.log(f"Failed to parse manifest: {e}")
+            raise
diff --git a/lpbuildd/pottery/__init__.py b/lpbuildd/pottery/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lpbuildd/pottery/__init__.py
diff --git a/lpbuildd/pottery/intltool.py b/lpbuildd/pottery/intltool.py
new file mode 100644
index 0000000..59ec2da
--- /dev/null
+++ b/lpbuildd/pottery/intltool.py
@@ -0,0 +1,346 @@
+# Copyright 2009-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+"""Functions to build PO templates on the builder."""
+
+__all__ = [
+    "check_potfiles_in",
+    "generate_pot",
+    "generate_pots",
+    "get_translation_domain",
+    "find_intltool_dirs",
+    "find_potfiles_in",
+]
+
+import os.path
+import re
+import subprocess
+import tempfile
+
+
+def find_potfiles_in(backend, package_dir):
+    """Search `package_dir` and its subdirectories for POTFILES.in.
+
+    :param backend: The `Backend` where work is done.
+    :param package_dir: The directory to search.
+    :returns: A list of names of directories that contain a file
+        POTFILES.in, relative to `package_dir`.
+    """
+    paths = backend.find(
+        package_dir, include_directories=False, name="POTFILES.in"
+    )
+    return [os.path.dirname(path) for path in paths]
+
+
+def check_potfiles_in(backend, path):
+    """Check if the files listed in the POTFILES.in file exist.
+
+    Running 'intltool-update -m' will perform this check and also take a
+    possible POTFILES.skip into account. It stores details about 'missing'
+    (files that should be in POTFILES.in) and 'notexist'ing files (files
+    that are listed in POTFILES.in but don't exist) in files which are
+    named accordingly. These files are removed before the run.
+
+    We don't care about files missing from POTFILES.in but want to know if
+    all listed files exist. The presence of the 'notexist' file tells us
+    that.
+
+    :param backend: The `Backend` where work is done.
+    :param path: The directory where POTFILES.in resides.
+    :returns: False if the directory does not exist, if an error occurred
+        when executing intltool-update or if files are missing from
+        POTFILES.in. True if all went fine and all files in POTFILES.in
+        actually exist.
+    """
+    # Abort nicely if the directory does not exist.
+    if not backend.isdir(path):
+        return False
+    # Remove stale files from a previous run of intltool-update -m.
+    backend.run(
+        ["rm", "-f"]
+        + [os.path.join(path, name) for name in ("missing", "notexist")]
+    )
+    with open("/dev/null", "w") as devnull:
+        try:
+            backend.run(
+                ["/usr/bin/intltool-update", "-m"],
+                stdout=devnull,
+                stderr=devnull,
+                cwd=path,
+            )
+        except subprocess.CalledProcessError:
+            return False
+
+    return not backend.path_exists(os.path.join(path, "notexist"))
+
+
+def find_intltool_dirs(backend, package_dir):
+    """Search for directories with intltool structure.
+
+    `package_dir` and its subdirectories are searched. An 'intltool
+    structure' is a directory that contains a POFILES.in file and where all
+    files listed in that POTFILES.in do actually exist. The latter
+    condition makes sure that the file is not stale.
+
+    :param backend: The `Backend` where work is done.
+    :param package_dir: The directory to search.
+    :returns: A list of directory names, relative to `package_dir`.
+    """
+    return sorted(
+        podir
+        for podir in find_potfiles_in(backend, package_dir)
+        if check_potfiles_in(backend, os.path.join(package_dir, podir))
+    )
+
+
+def _get_AC_PACKAGE_NAME(config_file):
+    """Get the value of AC_PACKAGE_NAME from function parameters.
+
+    The value of AC_PACKAGE_NAME is either the first or the fourth
+    parameter of the AC_INIT call if it is called with at least two
+    parameters.
+    """
+    params = config_file.getFunctionParams("AC_INIT")
+    if params is None or len(params) < 2:
+        return None
+    if len(params) < 4:
+        return params[0]
+    else:
+        return params[3]
+
+
+def _try_substitution(config_files, varname, substitution):
+    """Try to find a substitution in the config files.
+
+    :returns: The completed substitution or None if none was found.
+    """
+    subst_value = None
+    if varname == substitution.name:
+        # Do not look for the same name in the current file.
+        config_files = config_files[:-1]
+    for config_file in reversed(config_files):
+        subst_value = config_file.getVariable(substitution.name)
+        if subst_value is None and substitution.name == "PACKAGE":
+            subst_value = _get_AC_PACKAGE_NAME(config_file)
+        if subst_value is not None:
+            # Substitution found.
+            break
+    else:
+        # No substitution found.
+        return None
+    return substitution.replace(subst_value)
+
+
+def get_translation_domain(backend, dirname):
+    """Get the translation domain for this PO directory.
+
+    Imitates some of the behavior of intltool-update to find out which
+    translation domain the build environment provides. The domain is usually
+    defined in the GETTEXT_PACKAGE variable in one of the build files. Another
+    variant is DOMAIN in the Makevars file. This function goes through the
+    ordered list of these possible locations, top to bottom, and tries to
+    find a valid value. Since the same variable name may be defined in
+    multiple files (usually configure.ac and Makefile.in.in), it needs to
+    keep trying with the next file, until it finds the most specific
+    definition.
+
+    If the found value contains a substitution, either autoconf style (@...@)
+    or make style ($(...)), the search is continued in the same file and back
+    up the list of files, now searching for the substitution. Multiple
+    substitutions or multi-level substitutions are not supported.
+    """
+    locations = [
+        ("../configure.ac", "GETTEXT_PACKAGE", True),
+        ("../configure.in", "GETTEXT_PACKAGE", True),
+        ("Makefile.in.in", "GETTEXT_PACKAGE", False),
+        ("Makevars", "DOMAIN", False),
+    ]
+    value = None
+    substitution = None
+    config_files = []
+    for filename, varname, keep_trying in locations:
+        path = os.path.join(dirname, filename)
+        if not backend.path_exists(path):
+            # Skip non-existent files.
+            continue
+        with tempfile.NamedTemporaryFile() as local_file:
+            backend.copy_out(path, local_file.name)
+            config_files.append(ConfigFile(local_file.file))
+        new_value = config_files[-1].getVariable(varname)
+        if new_value is not None:
+            value = new_value
+            if value == "AC_PACKAGE_NAME":
+                value = _get_AC_PACKAGE_NAME(config_files[-1])
+            else:
+                # Check if the value needs a substitution.
+                substitution = Substitution.get(value)
+                if substitution is not None:
+                    # Try to substitute with value.
+                    value = _try_substitution(
+                        config_files, varname, substitution
+                    )
+                    if value is None:
+                        # No substitution found; the setup is broken.
+                        break
+        if value is not None and not keep_trying:
+            # A value has been found.
+            break
+    return value
+
+
+def generate_pot(backend, podir, domain):
+    """Generate one PO template using intltool.
+
+    Although 'intltool-update -p' can try to find out the translation domain
+    we trust our own code more on this one and simply specify the domain.
+    Also, the man page for 'intltool-update' states that the '-g' option
+    "has an additional effect: the name of current working directory is no
+    more  limited  to 'po' or 'po-*'." We don't want that limit either.
+
+    :param backend: The `Backend` where work is done.
+    :param podir: The PO directory in which to build template.
+    :param domain: The translation domain to use as the name of the template.
+      If it is None or empty, 'messages.pot' will be used.
+    :return: The effective domain if generation succeeded, otherwise None.
+    """
+    if domain is None or domain.strip() == "":
+        domain = "messages"
+    with open("/dev/null", "w") as devnull:
+        try:
+            backend.run(
+                ["/usr/bin/intltool-update", "-p", "-g", domain],
+                stdout=devnull,
+                stderr=devnull,
+                cwd=podir,
+            )
+            return domain
+        except subprocess.CalledProcessError:
+            return None
+
+
+def generate_pots(backend, package_dir):
+    """Top-level function to generate all PO templates in a package."""
+    potpaths = []
+    for podir in find_intltool_dirs(backend, package_dir):
+        full_podir = os.path.join(package_dir, podir)
+        domain = get_translation_domain(backend, full_podir)
+        effective_domain = generate_pot(backend, full_podir, domain)
+        if effective_domain is not None:
+            potpaths.append(os.path.join(podir, effective_domain + ".pot"))
+    return potpaths
+
+
+class ConfigFile:
+    """Represent a config file and return variables defined in it."""
+
+    def __init__(self, file_or_name):
+        if isinstance(file_or_name, str):
+            with open(file_or_name, "rb") as conf_file:
+                content = conf_file.read()
+        else:
+            content = file_or_name.read()
+        if isinstance(content, bytes):
+            content = content.decode("UTF-8", "replace")
+        self.content = content
+
+    def _stripQuotes(self, identifier):
+        """Strip surrounding quotes from `identifier`, if present.
+
+        :param identifier: a string, possibly surrounded by matching
+            'single,' "double," or [bracket] quotes.
+        :return: `identifier` but with the outer pair of matching quotes
+            removed, if they were there.
+        """
+        if len(identifier) < 2:
+            return identifier
+
+        quote_pairs = [
+            ('"', '"'),
+            ("'", "'"),
+            ("[", "]"),
+        ]
+        for left, right in quote_pairs:
+            if identifier.startswith(left) and identifier.endswith(right):
+                return identifier[1:-1]
+
+        return identifier
+
+    def getVariable(self, name):
+        """Search the file for a variable definition with this name."""
+        pattern = re.compile(
+            r"^%s[ \t]*=[ \t]*([^\s]*)" % re.escape(name), re.M
+        )
+        result = pattern.search(self.content)
+        if result is None:
+            return None
+        return self._stripQuotes(result.group(1))
+
+    def getFunctionParams(self, name):
+        """Search file for a function call with this name.
+
+        Return its parameters.
+        """
+        pattern = re.compile(r"^%s\(([^)]*)\)" % re.escape(name), re.M)
+        result = pattern.search(self.content)
+        if result is None:
+            return None
+        else:
+            return [
+                self._stripQuotes(param.strip())
+                for param in result.group(1).split(",")
+            ]
+
+
+class Substitution:
+    """Find and replace substitutions.
+
+    Variable texts may contain other variables which should be substituted
+    for their value. These are either marked by surrounding @ signs (autoconf
+    style) or preceded by a $ sign with optional () (make style).
+
+    This class identifies a single such substitution in a variable text and
+    extracts the name of the variable whose value is to be inserted. It also
+    facilitates the actual replacement so that caller does not have to worry
+    about the substitution style that is being used.
+    """
+
+    autoconf_pattern = re.compile(r"@([^@]+)@")
+    makefile_pattern = re.compile(r"\$\(?([^\s\)]+)\)?")
+
+    @staticmethod
+    def get(variabletext):
+        """Factory method.
+
+        Creates a Substitution instance and checks if it found a substitution.
+
+        :param variabletext: A variable value with possible substitution.
+        :returns: A Substitution object or None if no substitution was found.
+        """
+        subst = Substitution(variabletext)
+        if subst.name is not None:
+            return subst
+        return None
+
+    def _searchForPatterns(self):
+        """Search for all the available patterns in variable text."""
+        result = self.autoconf_pattern.search(self.text)
+        if result is None:
+            result = self.makefile_pattern.search(self.text)
+        return result
+
+    def __init__(self, variabletext):
+        """Extract substitution name from variable text."""
+        self.text = variabletext
+        self.replaced = False
+        result = self._searchForPatterns()
+        if result is None:
+            self._replacement = None
+            self.name = None
+        else:
+            self._replacement = result.group(0)
+            self.name = result.group(1)
+
+    def replace(self, value):
+        """Return copy of the variable text with the substitution resolved."""
+        self.replaced = True
+        return self.text.replace(self._replacement, value)
diff --git a/lpbuildd/pottery/tests/__init__.py b/lpbuildd/pottery/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lpbuildd/pottery/tests/__init__.py
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_1.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_1.tar.bz2
new file mode 100644
index 0000000..b436e56
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_1.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_2.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_2.tar.bz2
new file mode 100644
index 0000000..3ff224d
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_POTFILES_in_2.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_base.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_base.tar.bz2
new file mode 100644
index 0000000..e6858ac
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_base.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_ac.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_ac.tar.bz2
new file mode 100644
index 0000000..792655a
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_ac.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in.tar.bz2
new file mode 100644
index 0000000..fef37a8
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in_substitute_version.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in_substitute_version.tar.bz2
new file mode 100644
index 0000000..b2f6e9a
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_configure_in_substitute_version.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in.tar.bz2
new file mode 100644
index 0000000..89332d7
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute.tar.bz2
new file mode 100644
index 0000000..59b1faa
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_broken.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_broken.tar.bz2
new file mode 100644
index 0000000..abd8929
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_broken.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_file.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_file.tar.bz2
new file mode 100644
index 0000000..79a6d65
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_file.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_name.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_name.tar.bz2
new file mode 100644
index 0000000..b3f57de
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makefile_in_in_substitute_same_name.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makevars.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makevars.tar.bz2
new file mode 100644
index 0000000..013e14c
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_domain_makevars.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_full_ok.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_full_ok.tar.bz2
new file mode 100644
index 0000000..f3f208c
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_full_ok.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/pottery_test_data/intltool_single_ok.tar.bz2 b/lpbuildd/pottery/tests/pottery_test_data/intltool_single_ok.tar.bz2
new file mode 100644
index 0000000..7c60d80
Binary files /dev/null and b/lpbuildd/pottery/tests/pottery_test_data/intltool_single_ok.tar.bz2 differ
diff --git a/lpbuildd/pottery/tests/test_intltool.py b/lpbuildd/pottery/tests/test_intltool.py
new file mode 100644
index 0000000..e51fb4e
--- /dev/null
+++ b/lpbuildd/pottery/tests/test_intltool.py
@@ -0,0 +1,629 @@
+# Copyright 2009-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import errno
+import os
+import tarfile
+from io import StringIO
+from textwrap import dedent
+
+from fixtures import TempDir
+from testtools import TestCase
+from testtools.matchers import Equals, MatchesSetwise
+
+from lpbuildd.pottery.intltool import (
+    ConfigFile,
+    check_potfiles_in,
+    find_intltool_dirs,
+    find_potfiles_in,
+    generate_pot,
+    generate_pots,
+    get_translation_domain,
+)
+from lpbuildd.tests.fakebuilder import FakeMethod, UncontainedBackend
+
+
+class SetupTestPackageMixin:
+    test_data_dir = "pottery_test_data"
+
+    def prepare_package(self, packagename, buildfiles=None):
+        """Unpack the specified package in a temporary directory.
+
+        Return the package's directory.
+
+        :param packagename: The name of the package to prepare.
+        :param buildfiles: A dictionary of path:content describing files to
+            add to the package.
+        """
+        # First build the path for the package.
+        tarpath = os.path.join(
+            os.path.dirname(__file__),
+            self.test_data_dir,
+            packagename + ".tar.bz2",
+        )
+        # Then change into the temporary directory and unpack it.
+        parent = self.useFixture(TempDir()).path
+        with tarfile.open(tarpath, "r|bz2") as tar:
+            tar.extractall(parent)
+        package_dir = os.path.join(parent, packagename)
+
+        if buildfiles is None:
+            return package_dir
+
+        # Add files as requested.
+        for path, content in buildfiles.items():
+            directory = os.path.dirname(path)
+            if directory != "":
+                try:
+                    os.makedirs(os.path.join(package_dir, directory))
+                except OSError as e:
+                    # Doesn't matter if it already exists.
+                    if e.errno != errno.EEXIST:
+                        raise
+            with open(os.path.join(package_dir, path), "w") as the_file:
+                the_file.write(content)
+
+        return package_dir
+
+
+class TestDetectIntltool(TestCase, SetupTestPackageMixin):
+    def test_detect_potfiles_in(self):
+        # Find POTFILES.in in a package with multiple dirs when only one has
+        # POTFILES.in.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_POTFILES_in_1")
+        dirs = find_potfiles_in(backend, package_dir)
+        self.assertThat(dirs, MatchesSetwise(Equals("po-intltool")))
+
+    def test_detect_potfiles_in_module(self):
+        # Find POTFILES.in in a package with POTFILES.in at different levels.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_POTFILES_in_2")
+        dirs = find_potfiles_in(backend, package_dir)
+        self.assertThat(
+            dirs, MatchesSetwise(Equals("po"), Equals("module1/po"))
+        )
+
+    def test_check_potfiles_in_content_ok(self):
+        # Ideally all files listed in POTFILES.in exist in the source package.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_single_ok")
+        self.assertTrue(
+            check_potfiles_in(backend, os.path.join(package_dir, "po"))
+        )
+
+    def test_check_potfiles_in_content_ok_file_added(self):
+        # If a file is not listed in POTFILES.in, the file is still good for
+        # our purposes.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_single_ok")
+        added_path = os.path.join(package_dir, "src/sourcefile_new.c")
+        with open(added_path, "w") as added_file:
+            added_file.write("/* Test file. */")
+        self.assertTrue(
+            check_potfiles_in(backend, os.path.join(package_dir, "po"))
+        )
+
+    def test_check_potfiles_in_content_not_ok_file_removed(self):
+        # If a file is missing that is listed in POTFILES.in, the file
+        # intltool structure is probably broken and cannot be used for
+        # our purposes.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_single_ok")
+        os.remove(os.path.join(package_dir, "src/sourcefile1.c"))
+        self.assertFalse(
+            check_potfiles_in(backend, os.path.join(package_dir, "po"))
+        )
+
+    def test_check_potfiles_in_wrong_directory(self):
+        # Passing in the wrong directory will cause the check to fail
+        # gracefully and return False.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_single_ok")
+        self.assertFalse(
+            check_potfiles_in(backend, os.path.join(package_dir, "foo"))
+        )
+
+    def test_find_intltool_dirs(self):
+        # Complete run: find all directories with intltool structure.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        self.assertEqual(
+            ["po-module1", "po-module2"],
+            find_intltool_dirs(backend, package_dir),
+        )
+
+    def test_find_intltool_dirs_broken(self):
+        # Complete run: part of the intltool structure is broken.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        os.remove(os.path.join(package_dir, "src/module1/sourcefile1.c"))
+        self.assertEqual(
+            ["po-module2"], find_intltool_dirs(backend, package_dir)
+        )
+
+
+class TestIntltoolDomain(TestCase, SetupTestPackageMixin):
+    def test_get_translation_domain_makevars(self):
+        # Find a translation domain in Makevars.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_domain_makevars")
+        self.assertEqual(
+            "translationdomain",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makevars_subst_1(self):
+        # Find a translation domain in Makevars, substituted from
+        # Makefile.in.in.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_base",
+            {
+                "po/Makefile.in.in": "PACKAGE=packagename-in-in\n",
+                "po/Makevars": "DOMAIN = $(PACKAGE)\n",
+            },
+        )
+        self.assertEqual(
+            "packagename-in-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makevars_subst_2(self):
+        # Find a translation domain in Makevars, substituted from
+        # configure.ac.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_base",
+            {
+                "configure.ac": "PACKAGE=packagename-ac\n",
+                "po/Makefile.in.in": "# No domain here.\n",
+                "po/Makevars": "DOMAIN = $(PACKAGE)\n",
+            },
+        )
+        self.assertEqual(
+            "packagename-ac",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makefile_in_in(self):
+        # Find a translation domain in Makefile.in.in.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_domain_makefile_in_in")
+        self.assertEqual(
+            "packagename-in-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_ac(self):
+        # Find a translation domain in configure.ac.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_domain_configure_ac")
+        self.assertEqual(
+            "packagename-ac",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def prepare_ac_init(self, parameters, extra_files=None):
+        # Prepare test for various permutations of AC_INIT parameters
+        configure_ac_content = (
+            dedent(
+                """
+            AC_INIT(%s)
+            GETTEXT_PACKAGE=AC_PACKAGE_NAME
+            """
+            )
+            % parameters
+        )
+        files = {"configure.ac": configure_ac_content}
+        if extra_files is not None:
+            files.update(extra_files)
+        return self.prepare_package("intltool_domain_base", files)
+
+    def test_get_translation_domain_configure_ac_init(self):
+        # Find a translation domain in configure.ac in AC_INIT.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "packagename-ac-init, 1.0, http://bug.org";
+        )
+        self.assertEqual(
+            "packagename-ac-init",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_ac_init_single_param(self):
+        # Find a translation domain in configure.ac in AC_INIT.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init("[Just 1 param]")
+        self.assertIsNone(
+            get_translation_domain(backend, os.path.join(package_dir, "po"))
+        )
+
+    def test_get_translation_domain_configure_ac_init_brackets(self):
+        # Find a translation domain in configure.ac in AC_INIT with brackets.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "[packagename-ac-init], 1.0, http://bug.org";
+        )
+        self.assertEqual(
+            "packagename-ac-init",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_ac_init_tarname(self):
+        # Find a translation domain in configure.ac in AC_INIT tar name
+        # parameter.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "[Package name], 1.0, http://bug.org, [package-tarname]"
+        )
+        self.assertEqual(
+            "package-tarname",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_ac_init_multiline(self):
+        # Find a translation domain in configure.ac in AC_INIT when it
+        # spans multiple lines.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "[packagename-ac-init],\n    1.0,\n    http://bug.org";
+        )
+        self.assertEqual(
+            "packagename-ac-init",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_ac_init_multiline_tarname(self):
+        # Find a translation domain in configure.ac in AC_INIT tar name
+        # parameter that is on a different line.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "[Package name], 1.0,\n    http://bug.org, [package-tarname]"
+        )
+        self.assertEqual(
+            "package-tarname",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_substitute_package_from_ac_init(self):
+        # PACKAGE is substituted from AC_INIT parameters as a fallback.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_ac_init(
+            "[packagename-ac-init], 1.0, http://bug.org";,
+            {"po/Makevars": "DOMAIN = $(PACKAGE)\n"},
+        )
+        self.assertEqual(
+            "packagename-ac-init",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_configure_in(self):
+        # Find a translation domain in configure.in.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_domain_configure_in")
+        self.assertEqual(
+            "packagename-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makefile_in_in_substitute(self):
+        # Find a translation domain in Makefile.in.in with substitution from
+        # configure.ac.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_makefile_in_in_substitute"
+        )
+        self.assertEqual(
+            "domainname-ac-in-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makefile_in_in_substitute_same_name(self):
+        # Find a translation domain in Makefile.in.in with substitution from
+        # configure.ac from a variable with the same name as in
+        # Makefile.in.in.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_makefile_in_in_substitute_same_name"
+        )
+        self.assertEqual(
+            "packagename-ac-in-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makefile_in_in_substitute_same_file(self):
+        # Find a translation domain in Makefile.in.in with substitution from
+        # the same file.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_makefile_in_in_substitute_same_file"
+        )
+        self.assertEqual(
+            "domain-in-in-in-in",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+    def test_get_translation_domain_makefile_in_in_substitute_broken(self):
+        # Find no translation domain in Makefile.in.in when the substitution
+        # cannot be fulfilled.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_makefile_in_in_substitute_broken"
+        )
+        self.assertIsNone(
+            get_translation_domain(backend, os.path.join(package_dir, "po"))
+        )
+
+    def test_get_translation_domain_configure_in_substitute_version(self):
+        # Find a translation domain in configure.in with Makefile-style
+        # substitution from the same file.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package(
+            "intltool_domain_configure_in_substitute_version"
+        )
+        self.assertEqual(
+            "domainname-in42",
+            get_translation_domain(backend, os.path.join(package_dir, "po")),
+        )
+
+
+class TestGenerateTemplates(TestCase, SetupTestPackageMixin):
+    def test_generate_pot(self):
+        # Generate a given PO template.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        self.assertEqual(
+            "module1",
+            generate_pot(
+                backend, os.path.join(package_dir, "po-module1"), "module1"
+            ),
+            "PO template generation failed.",
+        )
+        expected_path = "po-module1/module1.pot"
+        self.assertTrue(
+            os.access(os.path.join(package_dir, expected_path), os.F_OK),
+            "Generated PO template '%s' not found." % expected_path,
+        )
+
+    def test_generate_pot_no_domain(self):
+        # Generate a generic PO template.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        self.assertEqual(
+            "messages",
+            generate_pot(
+                backend, os.path.join(package_dir, "po-module1"), None
+            ),
+            "PO template generation failed.",
+        )
+        expected_path = "po-module1/messages.pot"
+        self.assertTrue(
+            os.access(os.path.join(package_dir, expected_path), os.F_OK),
+            "Generated PO template '%s' not found." % expected_path,
+        )
+
+    def test_generate_pot_empty_domain(self):
+        # Generate a generic PO template.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        self.assertEqual(
+            "messages",
+            generate_pot(backend, os.path.join(package_dir, "po-module1"), ""),
+            "PO template generation failed.",
+        )
+        expected_path = "po-module1/messages.pot"
+        self.assertTrue(
+            os.access(os.path.join(package_dir, expected_path), os.F_OK),
+            "Generated PO template '%s' not found." % expected_path,
+        )
+
+    def test_generate_pot_not_intltool(self):
+        # Fail when not an intltool setup.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        # Cripple the setup.
+        os.remove(os.path.join(package_dir, "po-module1/POTFILES.in"))
+        self.assertIsNone(
+            generate_pot(
+                backend, os.path.join(package_dir, "po-module1"), "nothing"
+            ),
+            "PO template generation should have failed.",
+        )
+        not_expected_path = "po-module1/nothing.pot"
+        self.assertFalse(
+            os.access(os.path.join(package_dir, not_expected_path), os.F_OK),
+            "Not expected PO template '%s' generated." % not_expected_path,
+        )
+
+    def test_generate_pots(self):
+        # Generate all PO templates in the package.
+        backend = UncontainedBackend("1")
+        package_dir = self.prepare_package("intltool_full_ok")
+        expected_paths = [
+            "po-module1/packagename-module1.pot",
+            "po-module2/packagename-module2.pot",
+        ]
+        pots_list = generate_pots(backend, package_dir)
+        self.assertEqual(expected_paths, pots_list)
+        for expected_path in expected_paths:
+            self.assertTrue(
+                os.access(os.path.join(package_dir, expected_path), os.F_OK),
+                "Generated PO template '%s' not found." % expected_path,
+            )
+
+
+class TestConfigFile(TestCase):
+    def _makeConfigFile(self, text):
+        """Create a `ConfigFile` containing `text`."""
+        return ConfigFile(StringIO(dedent(text)))
+
+    def test_getVariable_smoke(self):
+        configfile = self._makeConfigFile(
+            """
+            A = 1
+            B = 2
+            C = 3
+            """
+        )
+        self.assertEqual("1", configfile.getVariable("A"))
+        self.assertEqual("2", configfile.getVariable("B"))
+        self.assertEqual("3", configfile.getVariable("C"))
+
+    def test_getVariable_exists(self):
+        configfile = self._makeConfigFile("DDD=dd.d")
+        self.assertEqual("dd.d", configfile.getVariable("DDD"))
+
+    def test_getVariable_ignores_mere_mention(self):
+        configfile = self._makeConfigFile(
+            """
+            CCC
+            CCC = ccc # (this is the real definition)
+            CCC
+            """
+        )
+        self.assertEqual("ccc", configfile.getVariable("CCC"))
+
+    def test_getVariable_ignores_irrelevancies(self):
+        configfile = self._makeConfigFile(
+            """
+            A = a
+            ===
+            blah
+            FOO(n, m)
+            a = case-insensitive
+
+            Z = z
+            """
+        )
+        self.assertEqual("a", configfile.getVariable("A"))
+        self.assertEqual("z", configfile.getVariable("Z"))
+
+    def test_getVariable_exists_spaces_comment(self):
+        configfile = self._makeConfigFile("CCC = ccc # comment")
+        self.assertEqual("ccc", configfile.getVariable("CCC"))
+
+    def test_getVariable_empty(self):
+        configfile = self._makeConfigFile("AAA=")
+        self.assertEqual("", configfile.getVariable("AAA"))
+
+    def test_getVariable_empty_spaces(self):
+        configfile = self._makeConfigFile("BBB = ")
+        self.assertEqual("", configfile.getVariable("BBB"))
+
+    def test_getVariable_nonexistent(self):
+        configfile = self._makeConfigFile("X = y")
+        self.assertIsNone(configfile.getVariable("FFF"))
+
+    def test_getVariable_broken(self):
+        configfile = self._makeConfigFile("EEE \n= eee")
+        self.assertIsNone(configfile.getVariable("EEE"))
+
+    def test_getVariable_strips_quotes(self):
+        # Quotes get stripped off variables.
+        configfile = self._makeConfigFile("QQQ = 'qqq'")
+        self.assertEqual("qqq", configfile.getVariable("QQQ"))
+
+        # This is done by invoking _stripQuotes (tested separately).
+        configfile._stripQuotes = FakeMethod(result="foo")
+        self.assertEqual("foo", configfile.getVariable("QQQ"))
+        self.assertNotEqual(0, configfile._stripQuotes.call_count)
+
+    def test_getFunctionParams_single(self):
+        configfile = self._makeConfigFile("FUNC_1(param1)")
+        self.assertEqual(["param1"], configfile.getFunctionParams("FUNC_1"))
+
+    def test_getFunctionParams_multiple(self):
+        configfile = self._makeConfigFile("FUNC_2(param1, param2, param3 )")
+        self.assertEqual(
+            ["param1", "param2", "param3"],
+            configfile.getFunctionParams("FUNC_2"),
+        )
+
+    def test_getFunctionParams_multiline_indented(self):
+        configfile = self._makeConfigFile(
+            """
+            ML_FUNC_1(param1,
+                param2, param3)
+            """
+        )
+        self.assertEqual(
+            ["param1", "param2", "param3"],
+            configfile.getFunctionParams("ML_FUNC_1"),
+        )
+
+    def test_getFunctionParams_multiline_not_indented(self):
+        configfile = self._makeConfigFile(
+            """
+            ML_FUNC_2(
+            param1,
+            param2)
+            """
+        )
+        self.assertEqual(
+            ["param1", "param2"], configfile.getFunctionParams("ML_FUNC_2")
+        )
+
+    def test_getFunctionParams_strips_quotes(self):
+        # Quotes get stripped off function parameters.
+        configfile = self._makeConfigFile('FUNC("param")')
+        self.assertEqual(["param"], configfile.getFunctionParams("FUNC"))
+
+        # This is done by invoking _stripQuotes (tested separately).
+        configfile._stripQuotes = FakeMethod(result="arg")
+        self.assertEqual(["arg"], configfile.getFunctionParams("FUNC"))
+        self.assertNotEqual(0, configfile._stripQuotes.call_count)
+
+    def test_stripQuotes_unquoted(self):
+        # _stripQuotes leaves unquoted identifiers intact.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("hello", configfile._stripQuotes("hello"))
+
+    def test_stripQuotes_empty(self):
+        configfile = self._makeConfigFile("")
+        self.assertEqual("", configfile._stripQuotes(""))
+
+    def test_stripQuotes_single_quotes(self):
+        # Single quotes are stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("x", configfile._stripQuotes("'x'"))
+
+    def test_stripQuotes_double_quotes(self):
+        # Double quotes are stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("y", configfile._stripQuotes('"y"'))
+
+    def test_stripQuotes_bracket_quotes(self):
+        # Brackets are stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("z", configfile._stripQuotes("[z]"))
+
+    def test_stripQuotes_opening_brackets(self):
+        # An opening bracket must be matched by a closing one.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("[x[", configfile._stripQuotes("[x["))
+
+    def test_stripQuotes_closing_brackets(self):
+        # A closing bracket is not accepted as an opening quote.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("]x]", configfile._stripQuotes("]x]"))
+
+    def test_stripQuotes_multiple(self):
+        # Only a single layer of quotes is stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual('"n"', configfile._stripQuotes("'\"n\"'"))
+
+    def test_stripQuotes_single_quote(self):
+        # A string consisting of just one quote is not stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("'", configfile._stripQuotes("'"))
+
+    def test_stripQuotes_mismatched(self):
+        # Mismatched quotes are not stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual("'foo\"", configfile._stripQuotes("'foo\""))
+
+    def test_stripQuotes_unilateral(self):
+        # A quote that's only on one end doesn't get stripped.
+        configfile = self._makeConfigFile("")
+        self.assertEqual('"foo', configfile._stripQuotes('"foo'))
diff --git a/lpbuildd/proxy.py b/lpbuildd/proxy.py
new file mode 100644
index 0000000..1a05ff3
--- /dev/null
+++ b/lpbuildd/proxy.py
@@ -0,0 +1,264 @@
+# Copyright 2015-2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import base64
+import io
+from urllib.parse import urlparse
+
+from twisted.application import strports
+from twisted.internet import reactor
+from twisted.internet.interfaces import IHalfCloseableProtocol
+from twisted.python.compat import intToBytes
+from twisted.web import http, proxy
+from zope.interface import implementer
+
+from lpbuildd.util import RevokeProxyTokenError, revoke_proxy_token
+
+
+class BuilderProxyClient(proxy.ProxyClient):
+    def __init__(self, command, rest, version, headers, data, father):
+        proxy.ProxyClient.__init__(
+            self, command, rest, version, headers, data, father
+        )
+        # Why doesn't ProxyClient at least store this?
+        self.version = version
+        # We must avoid calling self.father.finish in the event that its
+        # connection was already lost, i.e. if the original client
+        # disconnects first (which is particularly likely in the case of
+        # CONNECT).
+        d = self.father.notifyFinish()
+        d.addBoth(self.requestFinished)
+
+    def connectionMade(self):
+        proxy.ProxyClient.connectionMade(self)
+        self.father.setChildClient(self)
+
+    def sendCommand(self, command, path):
+        # For some reason, HTTPClient.sendCommand doesn't preserve the
+        # protocol version.
+        self.transport.writeSequence(
+            [command, b" ", path, b" ", self.version, b"\r\n"]
+        )
+
+    def handleEndHeaders(self):
+        self.father.handleEndHeaders()
+
+    def sendData(self, data):
+        self.transport.write(data)
+
+    def endData(self):
+        if self.transport is not None:
+            self.transport.loseWriteConnection()
+
+    def requestFinished(self, result):
+        self._finished = True
+        self.transport.loseConnection()
+
+
+class BuilderProxyClientFactory(proxy.ProxyClientFactory):
+    protocol = BuilderProxyClient
+
+
+class BuilderProxyRequest(http.Request):
+    child_client = None
+    _request_buffer = None
+    _request_data_done = False
+
+    def setChildClient(self, child_client):
+        self.child_client = child_client
+        if self._request_buffer is not None:
+            self.child_client.sendData(self._request_buffer.getvalue())
+            self._request_buffer = None
+        if self._request_data_done:
+            self.child_client.endData()
+
+    def allHeadersReceived(self, command, path, version):
+        # Normally done in `requestReceived`, but we disable that since it
+        # does other things we don't want.
+        self.method, self.uri, self.clientproto = command, path, version
+        self.client = self.channel.transport.getPeer()
+        self.host = self.channel.transport.getHost()
+
+        remote_parsed = urlparse(self.channel.factory.remote_url)
+        request_parsed = urlparse(path)
+        headers = self.getAllHeaders().copy()
+        if b"host" not in headers and request_parsed.netloc:
+            headers[b"host"] = request_parsed.netloc
+        if remote_parsed.username:
+            auth = (
+                remote_parsed.username + ":" + remote_parsed.password
+            ).encode("ASCII")
+            authHeader = b"Basic " + base64.b64encode(auth)
+            headers[b"proxy-authorization"] = authHeader
+        self.client_factory = BuilderProxyClientFactory(
+            command, path, version, headers, b"", self
+        )
+        reactor.connectTCP(
+            remote_parsed.hostname, remote_parsed.port, self.client_factory
+        )
+
+    def requestReceived(self, command, path, version):
+        # We do most of our work in `allHeadersReceived` instead.
+        pass
+
+    def rawDataReceived(self, data):
+        if self.child_client is not None:
+            if not self._request_data_done:
+                self.child_client.sendData(data)
+        else:
+            if self._request_buffer is None:
+                self._request_buffer = io.BytesIO()
+            self._request_buffer.write(data)
+
+    def handleEndHeaders(self):
+        # Cut-down version of Request.write.  We must avoid switching to
+        # chunked encoding for the sake of CONNECT; since our actual
+        # response data comes from another proxy, we can cut some corners.
+        if self.startedWriting:
+            return
+        self.startedWriting = 1
+        lines = []
+        lines.append(
+            self.clientproto
+            + b" "
+            + intToBytes(self.code)
+            + b" "
+            + self.code_message
+            + b"\r\n"
+        )
+        for name, values in self.responseHeaders.getAllRawHeaders():
+            for value in values:
+                lines.extend([name, b": ", value, b"\r\n"])
+        lines.append(b"\r\n")
+        self.transport.writeSequence(lines)
+
+    def write(self, data):
+        if self.channel is not None:
+            self.channel.resetTimeout()
+        http.Request.write(self, data)
+
+    def endData(self):
+        if self.child_client is not None:
+            self.child_client.endData()
+        self._request_data_done = True
+
+
+@implementer(IHalfCloseableProtocol)
+class BuilderProxy(http.HTTPChannel):
+    """A channel that streams request data.
+
+    The stock HTTPChannel isn't quite suitable for our needs, because it
+    expects to read the entire request data before passing control to the
+    request.  This doesn't work well for CONNECT.
+    """
+
+    requestFactory = BuilderProxyRequest
+
+    def checkPersistence(self, request, version):
+        # ProxyClient.__init__ forces "Connection: close".
+        return False
+
+    def allHeadersReceived(self):
+        http.HTTPChannel.allHeadersReceived(self)
+        self.requests[-1].allHeadersReceived(
+            self._command, self._path, self._version
+        )
+        if self._command == b"CONNECT":
+            # This is a lie, but we don't want HTTPChannel to decide that
+            # the request is finished just because a CONNECT request
+            # (naturally) has no Content-Length.
+            self.length = -1
+
+    def rawDataReceived(self, data):
+        self.resetTimeout()
+        if self.requests:
+            self.requests[-1].rawDataReceived(data)
+
+    def readConnectionLost(self):
+        for request in self.requests:
+            request.endData()
+
+    def writeConnectionLost(self):
+        pass
+
+
+class BuilderProxyFactory(http.HTTPFactory):
+    protocol = BuilderProxy
+
+    def __init__(self, manager, remote_url, *args, **kwargs):
+        http.HTTPFactory.__init__(self, *args, **kwargs)
+        self.manager = manager
+        self.remote_url = remote_url
+
+    def log(self, request):
+        # Log requests to the build log rather than to Twisted.
+        # Reimplement log formatting because there's no point logging the IP
+        # here.
+        referrer = http._escape(request.getHeader(b"referer") or b"-")
+        agent = http._escape(request.getHeader(b"user-agent") or b"-")
+        line = (
+            '%(timestamp)s "%(method)s %(uri)s %(protocol)s" '
+            '%(code)d %(length)s "%(referrer)s" "%(agent)s"\n'
+            % {
+                "timestamp": self._logDateTime,
+                "method": http._escape(request.method),
+                "uri": http._escape(request.uri),
+                "protocol": http._escape(request.clientproto),
+                "code": request.code,
+                "length": request.sentLength or "-",
+                "referrer": referrer,
+                "agent": agent,
+            }
+        )
+        self.manager._builder.log(line.encode("UTF-8"))
+
+
+class BuildManagerProxyMixin:
+    @property
+    def _use_fetch_service(self):
+        return hasattr(self, "use_fetch_service") and getattr(
+            self, "use_fetch_service"
+        )
+
+    def startProxy(self):
+        """Start the local builder proxy, if necessary.
+
+        This starts an internal proxy that stands before the Builder
+        Proxy/Fetch Service, so build systems, which do not comply
+        with standard `http(s)_proxy` environment variables, would
+        still work with the builder proxy.
+        """
+        if not self.proxy_url:
+            return []
+        proxy_port = self._builder._config.get("builder", "proxyport")
+        proxy_factory = BuilderProxyFactory(self, self.proxy_url, timeout=60)
+        self.proxy_service = strports.service(
+            "tcp:%s" % proxy_port, proxy_factory
+        )
+        self.proxy_service.setServiceParent(self._builder.service)
+        if hasattr(self.backend, "ipv4_network"):
+            proxy_host = self.backend.ipv4_network.ip
+        else:
+            proxy_host = "localhost"
+        return ["--proxy-url", f"http://{proxy_host}:{proxy_port}/";]
+
+    def stopProxy(self):
+        """Stop the internal local proxy (see `startProxy`), if necessary."""
+        if self.proxy_service is None:
+            return
+        self.proxy_service.disownServiceParent()
+        self.proxy_service = None
+
+    def revokeProxyToken(self):
+        """Revoke builder proxy token."""
+        if not self.revocation_endpoint:
+            return
+        self._builder.log("Revoking proxy token...\n")
+        try:
+            revoke_proxy_token(
+                self.proxy_url,
+                self.revocation_endpoint,
+                self._use_fetch_service,
+            )
+        except RevokeProxyTokenError as e:
+            self._builder.log(f"{e}\n")
diff --git a/lpbuildd/snap.py b/lpbuildd/snap.py
new file mode 100644
index 0000000..af4d165
--- /dev/null
+++ b/lpbuildd/snap.py
@@ -0,0 +1,166 @@
+# Copyright 2015-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+from configparser import NoOptionError, NoSectionError
+
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+from lpbuildd.proxy import BuildManagerProxyMixin
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+class SnapBuildState(DebianBuildState):
+    BUILD_SNAP = "BUILD_SNAP"
+
+
+class SnapBuildManager(BuildManagerProxyMixin, DebianBuildManager):
+    """Build a snap."""
+
+    backend_name = "lxd"
+    initial_build_state = SnapBuildState.BUILD_SNAP
+
+    @property
+    def needs_sanitized_logs(self):
+        return True
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot."""
+        self.name = extra_args["name"]
+        self.channels = extra_args.get("channels", {})
+        self.build_request_id = extra_args.get("build_request_id")
+        self.build_request_timestamp = extra_args.get(
+            "build_request_timestamp"
+        )
+        self.build_url = extra_args.get("build_url")
+        self.branch = extra_args.get("branch")
+        self.git_repository = extra_args.get("git_repository")
+        self.git_path = extra_args.get("git_path")
+        self.use_fetch_service = extra_args.get("use_fetch_service")
+        self.proxy_url = extra_args.get("proxy_url")
+        # currently only used to transport the mitm certificate
+        self.secrets = extra_args.get("secrets")
+        self.revocation_endpoint = extra_args.get("revocation_endpoint")
+        self.build_source_tarball = extra_args.get(
+            "build_source_tarball", False
+        )
+        self.private = extra_args.get("private", False)
+        self.proxy_service = None
+        self.target_architectures = extra_args.get("target_architectures")
+        self.disable_proxy_after_pull = extra_args.get(
+            "disable_proxy_after_pull"
+        )
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the process to build the snap."""
+        args = []
+        for snap, channel in sorted(self.channels.items()):
+            args.extend(["--channel", f"{snap}={channel}"])
+        if self.build_request_id:
+            args.extend(["--build-request-id", str(self.build_request_id)])
+        if self.build_request_timestamp:
+            args.extend(
+                ["--build-request-timestamp", self.build_request_timestamp]
+            )
+        if self.build_url:
+            args.extend(["--build-url", self.build_url])
+        args.extend(self.startProxy())
+        if self.revocation_endpoint:
+            args.extend(["--revocation-endpoint", self.revocation_endpoint])
+        if (
+            self.disable_proxy_after_pull
+            and self.proxy_url
+            and self.revocation_endpoint
+        ):
+            args.extend(
+                [
+                    "--upstream-proxy-url",
+                    self.proxy_url,
+                    "--disable-proxy-after-pull",
+                ]
+            )
+        if self.branch is not None:
+            args.extend(["--branch", self.branch])
+        if self.git_repository is not None:
+            args.extend(["--git-repository", self.git_repository])
+        if self.git_path is not None:
+            args.extend(["--git-path", self.git_path])
+        if self.build_source_tarball:
+            args.append("--build-source-tarball")
+        if self.private:
+            args.append("--private")
+        try:
+            snap_store_proxy_url = self._builder._config.get(
+                "proxy", "snapstore"
+            )
+            args.extend(["--snap-store-proxy-url", snap_store_proxy_url])
+        except (NoSectionError, NoOptionError):
+            pass
+        if self.target_architectures:
+            for arch in self.target_architectures:
+                args.extend(["--target-arch", arch])
+        if self.use_fetch_service:
+            args.append("--use_fetch_service")
+            # XXX 2024-04-17 jugmac00: I do not think we need to add checks
+            # whether this information is present, as otherwise the fetch
+            # service won't work anyway
+            args.extend(
+                [
+                    "--fetch-service-mitm-certificate",
+                    self.secrets["fetch_service_mitm_certificate"],
+                ]
+            )
+        args.append(self.name)
+        self.runTargetSubProcess("buildsnap", *args)
+
+    def iterate_BUILD_SNAP(self, retcode):
+        """Finished building the snap."""
+        self.stopProxy()
+        self.revokeProxyToken()
+        if retcode == RETCODE_SUCCESS:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD
+        ):
+            if not self.alreadyfailed:
+                self._builder.buildFail()
+                print("Returning build status: Build failed.")
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                print("Returning build status: Builder failed.")
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_BUILD_SNAP(self, retcode):
+        """Finished reaping after building the snap."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache."""
+        output_path = os.path.join("/build", self.name)
+        if self.backend.path_exists(output_path):
+            for entry in sorted(self.backend.listdir(output_path)):
+                path = os.path.join(output_path, entry)
+                if self.backend.islink(path):
+                    continue
+                # `.comp` files are the binary result of building snap
+                # components, see spec SD149.
+                if entry.endswith(
+                    (".snap", ".manifest", ".debug", ".dpkg.yaml", ".comp")
+                ):
+                    self.addWaitingFileFromBackend(path)
+        if self.build_source_tarball:
+            source_tarball_path = os.path.join(
+                "/build", "%s.tar.gz" % self.name
+            )
+            if self.backend.path_exists(source_tarball_path):
+                self.addWaitingFileFromBackend(source_tarball_path)
diff --git a/lpbuildd/sourcepackagerecipe.py b/lpbuildd/sourcepackagerecipe.py
new file mode 100644
index 0000000..00db21a
--- /dev/null
+++ b/lpbuildd/sourcepackagerecipe.py
@@ -0,0 +1,166 @@
+# Copyright 2010-2018 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+# pylint: disable-msg=E1002
+
+"""The manager class for building packages from recipes."""
+
+import os
+import re
+import subprocess
+
+from lpbuildd.builder import get_build_path
+from lpbuildd.debian import DebianBuildManager, DebianBuildState
+
+RETCODE_SUCCESS = 0
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD_TREE = 201
+RETCODE_FAILURE_INSTALL_BUILD_DEPS = 202
+RETCODE_FAILURE_BUILD_SOURCE_PACKAGE = 203
+
+
+def get_chroot_path(home, build_id, *extra):
+    """Return a path within the chroot.
+
+    :param home: The user's home directory.
+    :param build_id: The build_id of the build.
+    :param extra: Additional path elements.
+    """
+    return get_build_path(
+        home, build_id, "chroot-autobuild", os.environ["HOME"][1:], *extra
+    )
+
+
+class SourcePackageRecipeBuildState(DebianBuildState):
+    """The set of states that a recipe build can be in."""
+
+    BUILD_RECIPE = "BUILD_RECIPE"
+
+
+class SourcePackageRecipeBuildManager(DebianBuildManager):
+    """Build a source package from a bzr-builder recipe."""
+
+    initial_build_state = SourcePackageRecipeBuildState.BUILD_RECIPE
+
+    def __init__(self, builder, buildid):
+        """Constructor.
+
+        :param builder: A builder.
+        :param buildid: The id of the build (a str).
+        """
+        DebianBuildManager.__init__(self, builder, buildid)
+        self.build_recipe_path = os.path.join(self._bin, "buildrecipe")
+
+    def initiate(self, files, chroot, extra_args):
+        """Initiate a build with a given set of files and chroot.
+
+        :param files: The files sent by the manager with the request.
+        :param chroot: The sha1sum of the chroot to use.
+        :param extra_args: A dict of extra arguments.
+        """
+        self.recipe_text = extra_args["recipe_text"]
+        self.suite = extra_args["suite"]
+        self.component = extra_args["ogrecomponent"]
+        self.author_name = extra_args["author_name"]
+        self.author_email = extra_args["author_email"]
+        self.archive_purpose = extra_args["archive_purpose"]
+        self.git = extra_args.get("git", False)
+
+        super().initiate(files, chroot, extra_args)
+
+    def doRunBuild(self):
+        """Run the build process to build the source package."""
+        work_dir = os.path.join(os.environ["HOME"], "work")
+        self.backend.run(["mkdir", "-p", work_dir])
+        # buildrecipe needs to be able to write directly to the work
+        # directory.  (That directory needs to be inside the chroot so that
+        # buildrecipe can run dpkg-buildpackage on it from inside the
+        # chroot.)
+        subprocess.run(
+            [
+                "sudo",
+                "chown",
+                "-R",
+                "buildd:",
+                get_chroot_path(self.home, self._buildid, "work"),
+            ],
+            check=True,
+        )
+        with self.backend.open(
+            os.path.join(work_dir, "recipe"), "w"
+        ) as recipe_file:
+            recipe_file.write(self.recipe_text)
+        args = ["buildrecipe"]
+        if self.git:
+            args.append("--git")
+        args.extend(
+            [
+                self._buildid,
+                self.author_name.encode("utf-8"),
+                self.author_email,
+                self.suite,
+                self.series,
+                self.component,
+                self.archive_purpose,
+            ]
+        )
+        self.runSubProcess(self.build_recipe_path, args)
+
+    def iterate_BUILD_RECIPE(self, retcode):
+        """Move from BUILD_RECIPE to the next logical state."""
+        if retcode == RETCODE_SUCCESS:
+            print("Returning build status: OK")
+            return self.deferGatherResults()
+        elif retcode == RETCODE_FAILURE_INSTALL_BUILD_DEPS:
+            if not self.alreadyfailed:
+                rx = (
+                    r"The following packages have unmet dependencies:\n"
+                    r".*: Depends: ([^ ]*( \([^)]*\))?)"
+                )
+                _, mo = self.searchLogContents([[rx, re.M]])
+                if mo:
+                    missing_dep = mo.group(1).decode("UTF-8", "replace")
+                    self._builder.depFail(missing_dep)
+                    print("Returning build status: DEPFAIL")
+                    print("Dependencies: " + missing_dep)
+                else:
+                    print("Returning build status: Build failed")
+                    self._builder.buildFail()
+            self.alreadyfailed = True
+        elif (
+            retcode >= RETCODE_FAILURE_INSTALL
+            and retcode <= RETCODE_FAILURE_BUILD_SOURCE_PACKAGE
+        ):
+            # XXX AaronBentley 2009-01-13: We should handle depwait separately
+            if not self.alreadyfailed:
+                self._builder.buildFail()
+                print("Returning build status: Build failed.")
+            self.alreadyfailed = True
+        else:
+            if not self.alreadyfailed:
+                self._builder.builderFail()
+                print("Returning build status: Builder failed.")
+            self.alreadyfailed = True
+        self.doReapProcesses(self._state)
+
+    def iterateReap_BUILD_RECIPE(self, retcode):
+        """Finished reaping after recipe building."""
+        self._state = DebianBuildState.UMOUNT
+        self.doUnmounting()
+
+    def getChangesFilename(self):
+        """Return the path to the changes file."""
+        work_path = get_build_path(self.home, self._buildid)
+        for name in os.listdir(work_path):
+            if name.endswith("_source.changes"):
+                return os.path.join(work_path, name)
+
+    def gatherResults(self):
+        """Gather the results of the build and add them to the file cache.
+
+        The primary file we care about is the .changes file.
+        The manifest is also a useful record.
+        """
+        DebianBuildManager.gatherResults(self)
+        self._builder.addWaitingFile(
+            get_build_path(self.home, self._buildid, "manifest")
+        )
diff --git a/lpbuildd/target/__init__.py b/lpbuildd/target/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lpbuildd/target/__init__.py
diff --git a/lpbuildd/target/apt.py b/lpbuildd/target/apt.py
new file mode 100644
index 0000000..3f9ddca
--- /dev/null
+++ b/lpbuildd/target/apt.py
@@ -0,0 +1,149 @@
+# Copyright 2009-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os
+import subprocess
+import sys
+import time
+from textwrap import dedent
+
+from lpbuildd.target.operation import Operation
+
+logger = logging.getLogger(__name__)
+
+
+class OverrideSourcesList(Operation):
+    description = "Override sources.list in the target environment."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--apt-proxy-url", metavar="URL", help="APT proxy URL"
+        )
+        parser.add_argument(
+            "archives", metavar="ARCHIVE", nargs="+", help="sources.list lines"
+        )
+
+    def run(self):
+        logger.info("Overriding sources.list in build-%s", self.args.build_id)
+        with self.backend.open(
+            "/etc/apt/sources.list", mode="w+"
+        ) as sources_list:
+            for archive in self.args.archives:
+                print(archive, file=sources_list)
+            os.fchmod(sources_list.fileno(), 0o644)
+        with self.backend.open(
+            "/etc/apt/apt.conf.d/99retries", mode="w+"
+        ) as apt_retries_conf:
+            print('Acquire::Retries "3";', file=apt_retries_conf)
+            os.fchmod(apt_retries_conf.fileno(), 0o644)
+        # Versions of APT that support phased updates do this automatically
+        # if running in a chroot, but builds may be running in a LXD
+        # container instead.
+        with self.backend.open(
+            "/etc/apt/apt.conf.d/99phasing", mode="w+"
+        ) as apt_phasing_conf:
+            print(
+                'APT::Get::Always-Include-Phased-Updates "true";',
+                file=apt_phasing_conf,
+            )
+            os.fchmod(apt_phasing_conf.fileno(), 0o644)
+        if self.args.apt_proxy_url is not None:
+            with self.backend.open(
+                "/etc/apt/apt.conf.d/99proxy", mode="w+"
+            ) as apt_proxy_conf:
+                print(
+                    f'Acquire::http::Proxy "{self.args.apt_proxy_url}";',
+                    file=apt_proxy_conf,
+                )
+                os.fchmod(apt_proxy_conf.fileno(), 0o644)
+        for pocket in ("proposed", "backports"):
+            with self.backend.open(
+                f"/etc/apt/preferences.d/{pocket}.pref", mode="w+"
+            ) as preferences:
+                print(
+                    dedent(
+                        f"""\
+                    Package: *
+                    Pin: release a=*-{pocket}
+                    Pin-Priority: 500
+                    """
+                    ),
+                    file=preferences,
+                    end="",
+                )
+                os.fchmod(preferences.fileno(), 0o644)
+        return 0
+
+
+class AddTrustedKeys(Operation):
+    description = "Write out new trusted keys."
+
+    def __init__(self, args, parser):
+        super().__init__(args, parser)
+        self.input_file = sys.stdin.buffer
+        self.show_keys_file = sys.stdout.buffer
+
+    def run(self):
+        """Add trusted keys from an input file."""
+        logger.info("Adding trusted keys to build-%s", self.args.build_id)
+        # We must read the input data before calling `backend.open`, since
+        # it may call `lxc exec` and that apparently drains stdin.
+        input_data = self.input_file.read()
+        gpg_cmd = [
+            "gpg",
+            "--ignore-time-conflict",
+            "--no-options",
+            "--no-keyring",
+        ]
+        with self.backend.open(
+            "/etc/apt/trusted.gpg.d/launchpad-buildd.gpg", mode="wb+"
+        ) as keyring:
+            subprocess.run(
+                gpg_cmd + ["--dearmor"],
+                input=input_data,
+                stdout=keyring,
+                check=True,
+            )
+            keyring.seek(0)
+            subprocess.check_call(
+                gpg_cmd
+                + ["--show-keys", "--keyid-format", "long", "--fingerprint"],
+                stdin=keyring,
+                stdout=self.show_keys_file,
+            )
+            os.fchmod(keyring.fileno(), 0o644)
+        return 0
+
+
+class Update(Operation):
+    description = "Update the target environment."
+
+    def run(self):
+        logger.info("Updating target for build %s", self.args.build_id)
+        with open("/dev/null") as devnull:
+            env = {
+                "LANG": "C",
+                "DEBIAN_FRONTEND": "noninteractive",
+                "TTY": "unknown",
+            }
+            apt_get = "/usr/bin/apt-get"
+            update_args = [apt_get, "-uy", "update"]
+            try:
+                self.backend.run(update_args, env=env, stdin=devnull)
+            except subprocess.CalledProcessError:
+                logger.warning("Waiting 15 seconds and trying again ...")
+                time.sleep(15)
+                self.backend.run(update_args, env=env, stdin=devnull)
+            upgrade_args = [
+                apt_get,
+                "-o",
+                "DPkg::Options::=--force-confold",
+                "-uy",
+                "--purge",
+                "dist-upgrade",
+            ]
+            self.backend.run(upgrade_args, env=env, stdin=devnull)
+        return 0
diff --git a/lpbuildd/target/backend.py b/lpbuildd/target/backend.py
new file mode 100644
index 0000000..371af8f
--- /dev/null
+++ b/lpbuildd/target/backend.py
@@ -0,0 +1,254 @@
+# Copyright 2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os.path
+import subprocess
+import tempfile
+from contextlib import contextmanager
+from pathlib import Path
+from shutil import rmtree
+
+
+class BackendException(Exception):
+    pass
+
+
+class InvalidBuildFilePath(Exception):
+    pass
+
+
+def check_path_escape(buildd_path, path_to_check):
+    """Check the build file path doesn't escape the build directory."""
+    build_file_path = os.path.realpath(
+        os.path.join(buildd_path, path_to_check)
+    )
+    common_path = os.path.commonprefix((build_file_path, buildd_path))
+    if common_path != buildd_path:
+        raise InvalidBuildFilePath("Invalid build file path.")
+
+
+class Backend:
+    """A backend implementation for the environment where we run builds."""
+
+    supports_snapd = False
+
+    def __init__(self, build_id, series=None, arch=None, constraints=None):
+        self.build_id = build_id
+        self.series = series
+        self.arch = arch
+        self.constraints = constraints or []
+        self.build_path = os.path.join(os.environ["HOME"], "build-" + build_id)
+
+    def create(self, image_path, image_type):
+        """Create the backend based on a base image.
+
+        This puts the backend into a state where it is ready to be started.
+        """
+        raise NotImplementedError
+
+    def start(self):
+        """Start the backend.
+
+        This puts the backend into a state where it can run commands.
+        """
+        raise NotImplementedError
+
+    def run(
+        self,
+        args,
+        cwd=None,
+        env=None,
+        input_text=None,
+        get_output=False,
+        echo=False,
+        **kwargs,
+    ):
+        """Run a command in the target environment.
+
+        :param args: the command and arguments to run.
+        :param cwd: run the command in this working directory in the target.
+        :param env: additional environment variables to set.
+        :param input_text: input text to pass on the command's stdin.
+        :param get_output: if True, return the output from the command.
+        :param echo: if True, print the command before executing it, and
+            print any output from the command if `get_output` is also True.
+        :param kwargs: additional keyword arguments for `subprocess.Popen`.
+        """
+        raise NotImplementedError
+
+    def copy_in(self, source_path, target_path):
+        """Copy a file into the target environment.
+
+        The target file will be owned by root/root and have the same
+        permission mode as the source file.
+
+        :param source_path: the path to the file that should be copied from
+            the host system.
+        :param target_path: the path where the file should be installed
+            inside the target environment, relative to the target
+            environment's root.
+        """
+        raise NotImplementedError
+
+    def copy_out(self, source_path, target_path):
+        """Copy a file out of the target environment.
+
+        The target file will have the same permission mode as the source
+        file.
+
+        :param source_path: the path to the file that should be copied,
+            relative to the target environment's root.
+        :param target_path: the path where the file should be installed in
+            the host system.
+        """
+        raise NotImplementedError
+
+    def path_exists(self, path):
+        """Test whether a path exists in the target environment.
+
+        :param path: the path to the file to test, relative to the target
+            environment's root.
+        """
+        try:
+            self.run(["test", "-e", path])
+            return True
+        except subprocess.CalledProcessError:
+            return False
+
+    def isdir(self, path):
+        """Test whether a path is a directory in the target environment.
+
+        :param path: the path to test, relative to the target environment's
+            root.
+        """
+        try:
+            self.run(["test", "-d", path])
+            return True
+        except subprocess.CalledProcessError:
+            return False
+
+    def islink(self, path):
+        """Test whether a file is a symbolic link in the target environment.
+
+        :param path: the path to the file to test, relative to the target
+            environment's root.
+        """
+        try:
+            self.run(["test", "-h", path])
+            return True
+        except subprocess.CalledProcessError:
+            return False
+
+    def find(self, path, max_depth=None, include_directories=True, name=None):
+        """Find entries in `path`.
+
+        :param path: the path to the directory to search.
+        :param max_depth: do not descend more than this number of directory
+            levels: as with find(1), 1 includes the contents of `path`, 2
+            includes the contents of its subdirectories, etc.
+        :param include_directories: include entries representing
+            directories.
+        :param name: only include entries whose name is equal to this.
+        """
+        cmd = ["find", path, "-mindepth", "1"]
+        if max_depth is not None:
+            cmd.extend(["-maxdepth", str(max_depth)])
+        if not include_directories:
+            cmd.extend(["!", "-type", "d"])
+        if name is not None:
+            cmd.extend(["-name", name])
+        cmd.extend(["-printf", "%P\\0"])
+        paths = self.run(cmd, get_output=True).split(b"\0")[:-1]
+        # XXX cjwatson 2017-08-04: Use `os.fsdecode` instead once we're on
+        # Python 3.
+        return [p.decode("UTF-8") for p in paths]
+
+    def listdir(self, path):
+        """List a directory in the target environment.
+
+        :param path: the path to the directory to list, relative to the
+            target environment's root.
+        """
+        return self.find(path, max_depth=1)
+
+    def is_package_available(self, package):
+        """Test whether a package is available in the target environment.
+
+        :param package: a binary package name.
+        """
+        try:
+            with open("/dev/null", "w") as devnull:
+                output = self.run(
+                    ["apt-cache", "show", package],
+                    get_output=True,
+                    stderr=devnull,
+                    universal_newlines=True,
+                )
+            return ("Package: %s" % package) in output.splitlines()
+        except subprocess.CalledProcessError:
+            return False
+
+    def kill_processes(self):
+        """Kill any processes left running in the target.
+
+        This is allowed to do nothing if stopping the target will reliably
+        kill all processes running in it.
+        """
+        # XXX cjwatson 2017-08-22: It might make sense to merge this into
+        # `stop` later.
+        pass
+
+    def stop(self):
+        """Stop the backend."""
+        raise NotImplementedError
+
+    def remove(self):
+        """Remove the backend."""
+        subprocess.check_call(["sudo", "rm", "-rf", self.build_path])
+
+    @contextmanager
+    def open(self, path: str, mode="r", **kwargs):
+        """
+        Provides access to the files in the target environment via a
+        file-like object.
+
+        The arguments are the same as those of the built-in `open` function.
+        """
+        tmp_dir = tempfile.mkdtemp()
+        tmp_path = os.path.join(tmp_dir, Path(path).name)
+        if self.path_exists(path):
+            self.copy_out(path, tmp_path)
+        tmp_file = open(tmp_path, mode=mode, **kwargs)
+        try:
+            yield tmp_file
+        finally:
+            tmp_file.close()
+            if mode not in ("r", "rb", "rt"):
+                self.copy_in(tmp_path, path)
+            rmtree(tmp_dir)
+
+
+def make_backend(name, build_id, series=None, arch=None, constraints=None):
+    if name == "chroot":
+        from lpbuildd.target.chroot import Chroot
+
+        backend_factory = Chroot
+    elif name == "lxd":
+        from lpbuildd.target.lxd import LXD
+
+        backend_factory = LXD
+    elif name == "fake":
+        # Only for use in tests.
+        from lpbuildd.tests.fakebuilder import FakeBackend
+
+        backend_factory = FakeBackend
+    elif name == "uncontained":
+        # Only for use in tests.
+        from lpbuildd.tests.fakebuilder import UncontainedBackend
+
+        backend_factory = UncontainedBackend
+    else:
+        raise KeyError("Unknown backend: %s" % name)
+    return backend_factory(
+        build_id, series=series, arch=arch, constraints=constraints
+    )
diff --git a/lpbuildd/target/build_charm.py b/lpbuildd/target/build_charm.py
new file mode 100644
index 0000000..1bcbd1e
--- /dev/null
+++ b/lpbuildd/target/build_charm.py
@@ -0,0 +1,123 @@
+# Copyright 2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os
+
+from lpbuildd.target.backend import check_path_escape
+from lpbuildd.target.build_snap import SnapChannelsAction
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.proxy import BuilderProxyOperationMixin
+from lpbuildd.target.snapstore import SnapStoreOperationMixin
+from lpbuildd.target.vcs import VCSOperationMixin
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+logger = logging.getLogger(__name__)
+
+
+class BuildCharm(
+    BuilderProxyOperationMixin,
+    VCSOperationMixin,
+    SnapStoreOperationMixin,
+    Operation,
+):
+    description = "Build a charm."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--channel",
+            action=SnapChannelsAction,
+            metavar="SNAP=CHANNEL",
+            dest="channels",
+            default={},
+            help="install SNAP from CHANNEL",
+        )
+        parser.add_argument(
+            "--build-path", default=".", help="location of charm to build."
+        )
+        parser.add_argument("name", help="name of charm to build")
+
+    def __init__(self, args, parser):
+        super().__init__(args, parser)
+        self.buildd_path = os.path.join("/home/buildd", self.args.name)
+
+    def install(self):
+        logger.info("Running install phase")
+        deps = []
+        if self.args.proxy_url:
+            deps.extend(self.proxy_deps)
+            self.install_git_proxy()
+        if self.backend.supports_snapd:
+            # udev is installed explicitly to work around
+            # https://bugs.launchpad.net/snapd/+bug/1731519.
+            for dep in "snapd", "fuse", "squashfuse", "udev":
+                if self.backend.is_package_available(dep):
+                    deps.append(dep)
+        deps.extend(self.vcs_deps)
+        # See charmcraft.provider.CharmcraftBuilddBaseConfiguration.setup.
+        deps.extend(
+            [
+                "python3-pip",
+                "python3-setuptools",
+            ]
+        )
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+        if self.backend.supports_snapd:
+            self.snap_store_set_proxy()
+        for snap_name, channel in sorted(self.args.channels.items()):
+            # charmcraft is handled separately, since it requires --classic.
+            if snap_name != "charmcraft":
+                self.backend.run(
+                    ["snap", "install", "--channel=%s" % channel, snap_name]
+                )
+        if "charmcraft" in self.args.channels:
+            self.backend.run(
+                [
+                    "snap",
+                    "install",
+                    "--classic",
+                    "--channel=%s" % self.args.channels["charmcraft"],
+                    "charmcraft",
+                ]
+            )
+        else:
+            self.backend.run(["snap", "install", "--classic", "charmcraft"])
+        # The charmcraft snap can't see /build, so we have to do our work under
+        # /home/buildd instead.  Make sure it exists.
+        self.backend.run(["mkdir", "-p", "/home/buildd"])
+
+    def repo(self):
+        """Collect git or bzr branch."""
+        logger.info("Running repo phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        self.vcs_fetch(self.args.name, cwd="/home/buildd", env=env)
+        self.vcs_update_status(self.buildd_path)
+
+    def build(self):
+        logger.info("Running build phase...")
+        build_context_path = os.path.join(
+            "/home/buildd", self.args.name, self.args.build_path
+        )
+        check_path_escape(self.buildd_path, build_context_path)
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        args = ["charmcraft", "pack", "-v", "--destructive-mode"]
+        self.run_build_command(args, env=env, cwd=build_context_path)
+
+    def run(self):
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.repo()
+            self.build()
+        except Exception:
+            logger.exception("Build failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/build_livefs.py b/lpbuildd/target/build_livefs.py
new file mode 100644
index 0000000..d77e1cf
--- /dev/null
+++ b/lpbuildd/target/build_livefs.py
@@ -0,0 +1,215 @@
+# Copyright 2013-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os
+from collections import OrderedDict
+
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.snapstore import SnapStoreOperationMixin
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_build_path(build_id, *extra):
+    """Generate a path within the build directory.
+
+    :param build_id: the build id to use.
+    :param extra: the extra path segments within the build directory.
+    :return: the generated path.
+    """
+    return os.path.join(os.environ["HOME"], "build-" + build_id, *extra)
+
+
+class BuildLiveFS(SnapStoreOperationMixin, Operation):
+    description = "Build a live file system."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--subarch",
+            metavar="SUBARCH",
+            help="build for subarchitecture SUBARCH",
+        )
+        parser.add_argument(
+            "--project", metavar="PROJECT", help="build for project PROJECT"
+        )
+        parser.add_argument(
+            "--subproject",
+            metavar="SUBPROJECT",
+            help="build for subproject SUBPROJECT",
+        )
+        parser.add_argument("--datestamp", help="date stamp")
+        parser.add_argument(
+            "--image-format",
+            metavar="FORMAT",
+            help="produce an image in FORMAT",
+        )
+        parser.add_argument(
+            "--image-target",
+            dest="image_targets",
+            default=[],
+            action="append",
+            metavar="TARGET",
+            help="produce image for TARGET",
+        )
+        parser.add_argument(
+            "--repo-snapshot-stamp",
+            dest="repo_snapshot_stamp",
+            metavar="TIMESTAMP",
+            help="build against package repo state at TIMESTAMP",
+        )
+        parser.add_argument(
+            "--cohort-key",
+            dest="cohort_key",
+            metavar="COHORT_KEY",
+            help="use COHORT_KEY during snap downloads",
+        )
+        parser.add_argument(
+            "--proposed",
+            default=False,
+            action="store_true",
+            help="enable use of -proposed pocket",
+        )
+        parser.add_argument(
+            "--locale",
+            metavar="LOCALE",
+            help="use ubuntu-defaults-image to build an image for LOCALE",
+        )
+        parser.add_argument(
+            "--extra-ppa",
+            dest="extra_ppas",
+            default=[],
+            action="append",
+            help="use this additional PPA",
+        )
+        parser.add_argument(
+            "--extra-snap",
+            dest="extra_snaps",
+            default=[],
+            action="append",
+            help="use this additional snap",
+        )
+        parser.add_argument(
+            "--channel",
+            metavar="CHANNEL",
+            help="pull snaps from channel CHANNEL for ubuntu-core image",
+        )
+        parser.add_argument(
+            "--http-proxy", action="store", help="use this HTTP proxy for apt"
+        )
+        parser.add_argument(
+            "--debug",
+            default=False,
+            action="store_true",
+            help="enable detailed live-build debugging",
+        )
+
+    def install(self):
+        deps = ["livecd-rootfs"]
+        if self.backend.supports_snapd:
+            # udev is installed explicitly to work around
+            # https://bugs.launchpad.net/snapd/+bug/1731519.
+            for dep in "snapd", "fuse", "squashfuse", "udev":
+                if self.backend.is_package_available(dep):
+                    deps.append(dep)
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+        if self.backend.supports_snapd:
+            self.snap_store_set_proxy()
+        if self.args.locale is not None:
+            self.backend.run(
+                [
+                    "apt-get",
+                    "-y",
+                    "--install-recommends",
+                    "install",
+                    "ubuntu-defaults-builder",
+                ]
+            )
+
+    def build(self):
+        if self.args.locale is not None:
+            self.run_build_command(
+                [
+                    "ubuntu-defaults-image",
+                    "--locale",
+                    self.args.locale,
+                    "--arch",
+                    self.args.arch,
+                    "--release",
+                    self.args.series,
+                ]
+            )
+        else:
+            self.run_build_command(["rm", "-rf", "auto", "local"])
+            self.run_build_command(["mkdir", "-p", "auto"])
+            for lb_script in ("config", "build", "clean"):
+                lb_script_path = os.path.join(
+                    "/usr/share/livecd-rootfs/live-build/auto", lb_script
+                )
+                self.run_build_command(["ln", "-s", lb_script_path, "auto/"])
+            if self.args.debug:
+                self.run_build_command(["mkdir", "-p", "local/functions"])
+                self.run_build_command(
+                    ["sh", "-c", "echo 'set -x' >local/functions/debug.sh"]
+                )
+            self.run_build_command(["lb", "clean", "--purge"])
+
+            base_lb_env = OrderedDict()
+            base_lb_env["PROJECT"] = self.args.project
+            base_lb_env["ARCH"] = self.args.arch
+            if self.args.subproject is not None:
+                base_lb_env["SUBPROJECT"] = self.args.subproject
+            if self.args.subarch is not None:
+                base_lb_env["SUBARCH"] = self.args.subarch
+            if self.args.channel is not None:
+                base_lb_env["CHANNEL"] = self.args.channel
+            if self.args.image_targets:
+                base_lb_env["IMAGE_TARGETS"] = " ".join(
+                    self.args.image_targets
+                )
+            if self.args.repo_snapshot_stamp:
+                base_lb_env["REPO_SNAPSHOT_STAMP"] = (
+                    self.args.repo_snapshot_stamp
+                )
+            if self.args.cohort_key:
+                base_lb_env["COHORT_KEY"] = self.args.cohort_key
+            lb_env = base_lb_env.copy()
+            lb_env["SUITE"] = self.args.series
+            if self.args.datestamp is not None:
+                lb_env["NOW"] = self.args.datestamp
+            if self.args.image_format is not None:
+                lb_env["IMAGEFORMAT"] = self.args.image_format
+            if self.args.proposed:
+                lb_env["PROPOSED"] = "1"
+            if self.args.extra_ppas:
+                lb_env["EXTRA_PPAS"] = " ".join(self.args.extra_ppas)
+            if self.args.extra_snaps:
+                lb_env["EXTRA_SNAPS"] = " ".join(self.args.extra_snaps)
+            if self.args.http_proxy:
+                proxy_dict = {
+                    "http_proxy": self.args.http_proxy,
+                    "LB_APT_HTTP_PROXY": self.args.http_proxy,
+                }
+                lb_env.update(proxy_dict)
+                base_lb_env.update(proxy_dict)
+            self.run_build_command(["lb", "config"], env=lb_env)
+            self.run_build_command(["lb", "build"], env=base_lb_env)
+
+    def run(self):
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.build()
+        except Exception:
+            logger.exception("Build failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/build_oci.py b/lpbuildd/target/build_oci.py
new file mode 100644
index 0000000..1a52854
--- /dev/null
+++ b/lpbuildd/target/build_oci.py
@@ -0,0 +1,136 @@
+# Copyright 2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os.path
+from textwrap import dedent
+
+from lpbuildd.target.backend import check_path_escape
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.proxy import BuilderProxyOperationMixin
+from lpbuildd.target.snapstore import SnapStoreOperationMixin
+from lpbuildd.target.vcs import VCSOperationMixin
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+logger = logging.getLogger(__name__)
+
+
+class BuildOCI(
+    BuilderProxyOperationMixin,
+    VCSOperationMixin,
+    SnapStoreOperationMixin,
+    Operation,
+):
+    description = "Build an OCI image."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--build-file", help="path to Dockerfile in branch"
+        )
+        parser.add_argument(
+            "--build-path",
+            default=".",
+            help="context directory for docker build",
+        )
+        parser.add_argument(
+            "--build-arg",
+            default=[],
+            action="append",
+            help="A docker build ARG in the format of key=value. "
+            "This option can be repeated many times. For example: "
+            "--build-arg VAR1=A --build-arg VAR2=B",
+        )
+        parser.add_argument("name", help="name of image to build")
+
+    def __init__(self, args, parser):
+        super().__init__(args, parser)
+        self.buildd_path = os.path.join("/home/buildd", self.args.name)
+
+    def _add_docker_engine_proxy_settings(self):
+        """Add systemd file for docker proxy settings."""
+        # Create containing directory for systemd overrides
+        self.backend.run(
+            ["mkdir", "-p", "/etc/systemd/system/docker.service.d"]
+        )
+        # we need both http_proxy and https_proxy. The contents of the files
+        # are otherwise identical
+        for setting in ["http_proxy", "https_proxy"]:
+            contents = dedent(
+                f"""[Service]
+                Environment="{setting.upper()}={self.args.proxy_url}"
+                """
+            )
+            file_path = f"/etc/systemd/system/docker.service.d/{setting}.conf"
+            with self.backend.open(file_path, mode="w+") as systemd_file:
+                systemd_file.write(contents)
+
+    def install(self):
+        logger.info("Running install phase...")
+        deps = []
+        if self.args.proxy_url:
+            deps.extend(self.proxy_deps)
+            self.install_git_proxy()
+            # Add any proxy settings that are needed
+            self._add_docker_engine_proxy_settings()
+        deps.extend(self.vcs_deps)
+        deps.extend(["docker.io"])
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+        if self.backend.supports_snapd:
+            self.snap_store_set_proxy()
+        self.backend.run(["systemctl", "restart", "docker"])
+        # The docker snap can't see /build, so we have to do our work under
+        # /home/buildd instead.  Make sure it exists.
+        self.backend.run(["mkdir", "-p", "/home/buildd"])
+
+    def repo(self):
+        """Collect git or bzr branch."""
+        logger.info("Running repo phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        self.vcs_fetch(
+            self.args.name, cwd="/home/buildd", env=env, git_shallow_clone=True
+        )
+
+    def build(self):
+        logger.info("Running build phase...")
+        args = ["docker", "build", "--no-cache"]
+        if self.args.proxy_url:
+            for var in ("http_proxy", "https_proxy"):
+                args.extend(["--build-arg", f"{var}={self.args.proxy_url}"])
+        args.extend(["--tag", self.args.name])
+        if self.args.build_file is not None:
+            build_file_path = os.path.join(
+                self.args.build_path, self.args.build_file
+            )
+            check_path_escape(self.buildd_path, build_file_path)
+            args.extend(["--file", build_file_path])
+
+        # Keep this at the end, so we give the user a chance to override any
+        # build-arg we set automatically (like http_proxy).
+        for arg in self.args.build_arg:
+            args.extend(["--build-arg=%s" % arg])
+
+        build_context_path = os.path.join(
+            self.buildd_path, self.args.build_path
+        )
+        check_path_escape(self.buildd_path, build_context_path)
+        args.append(build_context_path)
+        self.run_build_command(args)
+
+    def run(self):
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.repo()
+            self.build()
+        except Exception:
+            logger.exception("Build failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/build_snap.py b/lpbuildd/target/build_snap.py
new file mode 100644
index 0000000..18af73a
--- /dev/null
+++ b/lpbuildd/target/build_snap.py
@@ -0,0 +1,328 @@
+# Copyright 2015-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import argparse
+import base64
+import json
+import logging
+import os.path
+from textwrap import dedent
+from urllib.parse import urlparse
+
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.proxy import BuilderProxyOperationMixin
+from lpbuildd.target.snapstore import SnapStoreOperationMixin
+from lpbuildd.target.vcs import VCSOperationMixin
+from lpbuildd.util import RevokeProxyTokenError, revoke_proxy_token
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+logger = logging.getLogger(__name__)
+
+
+class SnapChannelsAction(argparse.Action):
+    def __init__(self, option_strings, dest, nargs=None, **kwargs):
+        if nargs is not None:
+            raise ValueError("nargs not allowed")
+        super().__init__(option_strings, dest, **kwargs)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        if "=" not in values:
+            raise argparse.ArgumentError(
+                self, f"'{values}' is not of the form 'snap=channel'"
+            )
+        snap, channel = values.split("=", 1)
+        if getattr(namespace, self.dest, None) is None:
+            setattr(namespace, self.dest, {})
+        getattr(namespace, self.dest)[snap] = channel
+
+
+class BuildSnap(
+    BuilderProxyOperationMixin,
+    VCSOperationMixin,
+    SnapStoreOperationMixin,
+    Operation,
+):
+    description = "Build a snap."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--channel",
+            action=SnapChannelsAction,
+            metavar="SNAP=CHANNEL",
+            dest="channels",
+            default={},
+            help="install SNAP from CHANNEL",
+        )
+        parser.add_argument(
+            "--build-request-id",
+            help="ID of the request triggering this build on Launchpad",
+        )
+        parser.add_argument(
+            "--build-request-timestamp",
+            help="RFC3339 timestamp of the Launchpad build request",
+        )
+        parser.add_argument(
+            "--build-url", help="URL of this build on Launchpad"
+        )
+        parser.add_argument(
+            "--build-source-tarball",
+            default=False,
+            action="store_true",
+            help=(
+                "build a tarball containing all source code, including "
+                "external dependencies"
+            ),
+        )
+        parser.add_argument(
+            "--private",
+            default=False,
+            action="store_true",
+            help="build a private snap",
+        )
+        parser.add_argument(
+            "--target-arch",
+            dest="target_architectures",
+            action="append",
+            help="build for the specified architectures",
+        )
+        parser.add_argument(
+            "--upstream-proxy-url",
+            help=(
+                "URL of the builder proxy upstream of the one run internally "
+                "by launchpad-buildd"
+            ),
+        )
+        parser.add_argument(
+            "--disable-proxy-after-pull",
+            default=False,
+            action="store_true",
+            help="disable proxy access after the pull phase has finished",
+        )
+        parser.add_argument(
+            "--use_fetch_service",
+            default=False,
+            action="store_true",
+            help="use the fetch service instead of the builder proxy",
+        )
+        parser.add_argument(
+            "--fetch-service-mitm-certificate",
+            type=str,
+            help="content of the ca certificate",
+        )
+        parser.add_argument("name", help="name of snap to build")
+
+    def install_snapd_proxy(self, proxy_url):
+        """Install snapd proxy
+
+        This is necessary so the proxy can communicate properly
+        with snapcraft.
+        """
+        if proxy_url:
+            self.backend.run(
+                ["snap", "set", "system", f"proxy.http={proxy_url}"]
+            )
+            self.backend.run(
+                ["snap", "set", "system", f"proxy.https={proxy_url}"]
+            )
+
+    def install_mitm_certificate(self):
+        """Install ca certificate for the fetch service
+
+        This is necessary so the fetch service can man-in-the-middle all
+        requests when fetching dependencies.
+        """
+        with self.backend.open(
+            "/usr/local/share/ca-certificates/local-ca.crt", mode="wb"
+        ) as local_ca_cert:
+            # Certificate is passed as a Base64 encoded string.
+            # It's encoded using `base64 -w0` on the cert file.
+            decoded_certificate = base64.b64decode(
+                self.args.fetch_service_mitm_certificate.encode("ASCII")
+            )
+            local_ca_cert.write(decoded_certificate)
+            os.fchmod(local_ca_cert.fileno(), 0o644)
+        self.backend.run(["update-ca-certificates"])
+
+    def restart_snapd(self):
+        # This is required to pick up the certificate
+        self.backend.run(["systemctl", "restart", "snapd"])
+
+    def install_svn_servers(self):
+        proxy = urlparse(self.args.proxy_url)
+        svn_servers = dedent(
+            f"""\
+            [global]
+            http-proxy-host = {proxy.hostname}
+            http-proxy-port = {proxy.port}
+            """
+        )
+        # We should never end up with an authenticated proxy here since
+        # lpbuildd.snap deals with it, but it's almost as easy to just
+        # handle it as to assert that we don't need to.
+        if proxy.username:
+            svn_servers += f"http-proxy-username = {proxy.username}\n"
+        if proxy.password:
+            svn_servers += f"http-proxy-password = {proxy.password}\n"
+        self.backend.run(["mkdir", "-p", "/root/.subversion"])
+        with self.backend.open(
+            "/root/.subversion/servers", mode="w+"
+        ) as svn_servers_file:
+            svn_servers_file.write(svn_servers)
+            os.fchmod(svn_servers_file.fileno(), 0o644)
+
+    def install(self):
+        logger.info("Running install phase...")
+        deps = []
+        if self.args.proxy_url:
+            deps.extend(self.proxy_deps)
+            self.install_git_proxy()
+        if self.backend.supports_snapd:
+            # udev is installed explicitly to work around
+            # https://bugs.launchpad.net/snapd/+bug/1731519.
+            for dep in "snapd", "fuse", "squashfuse", "udev":
+                if self.backend.is_package_available(dep):
+                    deps.append(dep)
+        deps.extend(self.vcs_deps)
+        if "snapcraft" in self.args.channels:
+            # snapcraft requires sudo in lots of places, but can't depend on
+            # it when installed as a snap.
+            deps.append("sudo")
+        else:
+            deps.append("snapcraft")
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+        if self.backend.supports_snapd:
+            self.snap_store_set_proxy()
+        for snap_name, channel in sorted(self.args.channels.items()):
+            # snapcraft is handled separately, since it requires --classic.
+            if snap_name != "snapcraft":
+                self.backend.run(
+                    ["snap", "install", "--channel=%s" % channel, snap_name]
+                )
+                # If a given snap is pre-installed on the host image,
+                # refresh is required instead to change channel to the
+                # desired one.
+                self.backend.run(
+                    ["snap", "refresh", "--channel=%s" % channel, snap_name]
+                )
+        if "snapcraft" in self.args.channels:
+            self.backend.run(
+                [
+                    "snap",
+                    "install",
+                    "--classic",
+                    "--channel=%s" % self.args.channels["snapcraft"],
+                    "snapcraft",
+                ]
+            )
+        if self.args.proxy_url:
+            # XXX jugmac00 2024-04-17: this is configuring an SVN server;
+            # it is currently unclear whether this is still necessary for
+            # building snaps
+            # jugmac00 reached out both to William and Claudio to figure out
+            self.install_svn_servers()
+        if self.args.use_fetch_service:
+            self.install_mitm_certificate()
+            self.install_snapd_proxy(proxy_url=self.args.proxy_url)
+            self.restart_snapd()
+
+    def repo(self):
+        """Collect git or bzr branch."""
+        logger.info("Running repo phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        self.vcs_fetch(self.args.name, cwd="/build", env=env)
+        self.vcs_update_status(os.path.join("/build", self.args.name))
+
+    @property
+    def image_info(self):
+        data = {}
+        if self.args.build_request_id is not None:
+            data["build-request-id"] = f"lp-{self.args.build_request_id}"
+        if self.args.build_request_timestamp is not None:
+            data["build-request-timestamp"] = self.args.build_request_timestamp
+        if self.args.build_url is not None:
+            data["build_url"] = self.args.build_url
+        return json.dumps(data, sort_keys=True)
+
+    def pull(self):
+        """Run pull phase."""
+        logger.info("Running pull phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        env["SNAPCRAFT_LOCAL_SOURCES"] = "1"
+        env["SNAPCRAFT_SETUP_CORE"] = "1"
+        if not self.args.private:
+            env["SNAPCRAFT_BUILD_INFO"] = "1"
+        env["SNAPCRAFT_IMAGE_INFO"] = self.image_info
+        env["SNAPCRAFT_BUILD_ENVIRONMENT"] = "host"
+        self.run_build_command(
+            ["snapcraft", "pull"],
+            cwd=os.path.join("/build", self.args.name),
+            env=env,
+        )
+        if self.args.build_source_tarball:
+            self.run_build_command(
+                [
+                    "tar",
+                    "-czf",
+                    "%s.tar.gz" % self.args.name,
+                    "--format=gnu",
+                    "--sort=name",
+                    "--exclude-vcs",
+                    "--numeric-owner",
+                    "--owner=0",
+                    "--group=0",
+                    self.args.name,
+                ],
+                cwd="/build",
+            )
+        if (
+            self.args.disable_proxy_after_pull
+            and self.args.upstream_proxy_url
+            and self.args.revocation_endpoint
+        ):
+            logger.info("Revoking proxy token...")
+            try:
+                revoke_proxy_token(
+                    self.args.upstream_proxy_url,
+                    self.args.revocation_endpoint,
+                    self.args.use_fetch_service,
+                )
+            except RevokeProxyTokenError as e:
+                logger.info(str(e))
+
+    def build(self):
+        """Run all build, stage and snap phases."""
+        logger.info("Running build phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        if not self.args.private:
+            env["SNAPCRAFT_BUILD_INFO"] = "1"
+        env["SNAPCRAFT_IMAGE_INFO"] = self.image_info
+        env["SNAPCRAFT_BUILD_ENVIRONMENT"] = "host"
+        if self.args.target_architectures:
+            env["SNAPCRAFT_BUILD_FOR"] = self.args.target_architectures[0]
+        output_path = os.path.join("/build", self.args.name)
+        self.run_build_command(["snapcraft"], cwd=output_path, env=env)
+        for entry in sorted(self.backend.listdir(output_path)):
+            if self.backend.islink(os.path.join(output_path, entry)):
+                continue
+            if entry.endswith(".snap"):
+                self.run_build_command(["sha512sum", entry], cwd=output_path)
+
+    def run(self):
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.repo()
+            self.pull()
+            self.build()
+        except Exception:
+            logger.exception("Build failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/chroot.py b/lpbuildd/target/chroot.py
new file mode 100644
index 0000000..d690ec1
--- /dev/null
+++ b/lpbuildd/target/chroot.py
@@ -0,0 +1,192 @@
+# Copyright 2009-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os.path
+import signal
+import stat
+import subprocess
+import time
+
+from lpbuildd.target.backend import Backend, BackendException
+from lpbuildd.util import set_personality, shell_escape
+
+
+class Chroot(Backend):
+    """Sets up a chroot."""
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.chroot_path = os.path.join(self.build_path, "chroot-autobuild")
+
+    def create(self, image_path, image_type):
+        """See `Backend`."""
+        if image_type == "chroot":
+            subprocess.check_call(
+                ["sudo", "tar", "-C", self.build_path, "-xf", image_path]
+            )
+        else:
+            raise ValueError("Unhandled image type: %s" % image_type)
+
+    def start(self):
+        """See `Backend`."""
+        mounts = (
+            ("proc", None, "none", "proc"),
+            ("devpts", "gid=5,mode=620", "none", "dev/pts"),
+            ("sysfs", None, "none", "sys"),
+            ("tmpfs", None, "none", "dev/shm"),
+        )
+        for mount in mounts:
+            cmd = ["sudo", "mount", "-t", mount[0]]
+            if mount[1]:
+                cmd.extend(["-o", mount[1]])
+            cmd.append(mount[2])
+            cmd.append(os.path.join(self.chroot_path, mount[3]))
+            subprocess.check_call(cmd)
+
+        for path in ("/etc/hosts", "/etc/hostname", "/etc/resolv.conf"):
+            self.copy_in(path, path)
+
+    def run(
+        self,
+        args,
+        cwd=None,
+        env=None,
+        input_text=None,
+        get_output=False,
+        echo=False,
+        **kwargs,
+    ):
+        """See `Backend`."""
+        if env:
+            args = (
+                ["env"]
+                + [f"{key}={value}" for key, value in env.items()]
+                + args
+            )
+        if self.arch is not None:
+            args = set_personality(args, self.arch, series=self.series)
+        if cwd is not None:
+            # This requires either a helper program in the chroot or
+            # unpleasant quoting.  For now we go for the unpleasant quoting,
+            # though once we have coreutils >= 8.28 everywhere we'll be able
+            # to use "env --chdir".
+            escaped_args = " ".join(shell_escape(arg) for arg in args)
+            args = [
+                "/bin/sh",
+                "-c",
+                f"cd {shell_escape(cwd)} && {escaped_args}",
+            ]
+        if echo:
+            print(
+                "Running in chroot: %s"
+                % " ".join(shell_escape(arg) for arg in args)
+            )
+        cmd = ["sudo", "/usr/sbin/chroot", self.chroot_path] + args
+        if input_text is None and not get_output:
+            subprocess.check_call(cmd, **kwargs)
+        else:
+            if get_output:
+                kwargs["stdout"] = subprocess.PIPE
+            proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, **kwargs)
+            output, _ = proc.communicate(input_text)
+            if proc.returncode:
+                raise subprocess.CalledProcessError(proc.returncode, cmd)
+            if get_output:
+                if echo:
+                    print("Output:")
+                    output_text = output
+                    if isinstance(output_text, bytes):
+                        output_text = output_text.decode("UTF-8", "replace")
+                    print(output_text)
+                return output
+
+    def copy_in(self, source_path, target_path):
+        """See `Backend`."""
+        # Use install(1) so that we can end up with root/root ownership with
+        # a minimum of subprocess calls; the buildd user may not make sense
+        # in the target.
+        mode = stat.S_IMODE(os.stat(source_path).st_mode)
+        full_target_path = os.path.join(
+            self.chroot_path, target_path.lstrip("/")
+        )
+        subprocess.check_call(
+            [
+                "sudo",
+                "install",
+                "-o",
+                "root",
+                "-g",
+                "root",
+                "-m",
+                "%o" % mode,
+                source_path,
+                full_target_path,
+            ]
+        )
+
+    def copy_out(self, source_path, target_path):
+        # Don't use install(1) here because running `os.stat` to get file mode
+        # may be impossible. Instead, copy the with `cp` and set file ownership
+        # to buildd (this is necessary so that buildd can read/write the copied
+        # file).
+        full_source_path = os.path.join(
+            self.chroot_path, source_path.lstrip("/")
+        )
+        subprocess.check_call(
+            [
+                "sudo",
+                "cp",
+                "--preserve=timestamps",
+                full_source_path,
+                target_path,
+            ]
+        )
+        uid, gid = os.getuid(), os.getgid()
+        subprocess.check_call(["sudo", "chown", f"{uid}:{gid}", target_path])
+
+    def kill_processes(self):
+        """See `Backend`."""
+        prefix = os.path.realpath(self.chroot_path)
+        while True:
+            found = False
+            pids = [int(pid) for pid in os.listdir("/proc") if pid.isdigit()]
+            for pid in sorted(pids):
+                try:
+                    link = os.readlink(os.path.join("/proc", str(pid), "root"))
+                except OSError:
+                    continue
+                if link and (link == prefix or link.startswith(prefix + "/")):
+                    try:
+                        os.kill(pid, signal.SIGKILL)
+                    except OSError:
+                        pass
+                    found = True
+            if not found:
+                break
+
+    def _get_chroot_mounts(self):
+        with open("/proc/mounts") as mounts_file:
+            for line in mounts_file:
+                mount_path = line.split()[1]
+                if mount_path.startswith(self.chroot_path):
+                    yield mount_path
+
+    def stop(self):
+        """See `Backend`."""
+        for _ in range(20):
+            # Reverse the list, since we must unmount subdirectories before
+            # parent directories.
+            mounts = reversed(list(self._get_chroot_mounts()))
+            if not mounts:
+                break
+            retcodes = [
+                subprocess.call(["sudo", "umount", mount]) for mount in mounts
+            ]
+            if any(retcodes):
+                time.sleep(1)
+        else:
+            if list(self._get_chroot_mounts()):
+                subprocess.check_call(["lsof", self.chroot_path])
+                raise BackendException(
+                    "Failed to unmount %s" % self.chroot_path
+                )
diff --git a/lpbuildd/target/cli.py b/lpbuildd/target/cli.py
new file mode 100644
index 0000000..670cf28
--- /dev/null
+++ b/lpbuildd/target/cli.py
@@ -0,0 +1,75 @@
+# Copyright 2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import sys
+from argparse import ArgumentParser
+
+from lpbuildd.target.apt import AddTrustedKeys, OverrideSourcesList, Update
+from lpbuildd.target.build_charm import BuildCharm
+from lpbuildd.target.build_livefs import BuildLiveFS
+from lpbuildd.target.build_oci import BuildOCI
+from lpbuildd.target.build_snap import BuildSnap
+from lpbuildd.target.generate_translation_templates import (
+    GenerateTranslationTemplates,
+)
+from lpbuildd.target.lifecycle import (
+    Create,
+    KillProcesses,
+    Remove,
+    Start,
+    Stop,
+)
+from lpbuildd.target.run_ci import RunCI, RunCIPrepare
+
+
+def configure_logging():
+    class StdoutFilter(logging.Filter):
+        def filter(self, record):
+            return record.levelno < logging.ERROR
+
+    class StderrFilter(logging.Filter):
+        def filter(self, record):
+            return record.levelno >= logging.ERROR
+
+    logger = logging.getLogger()
+    stdout_handler = logging.StreamHandler(stream=sys.stdout)
+    stdout_handler.addFilter(StdoutFilter())
+    stderr_handler = logging.StreamHandler(stream=sys.stderr)
+    stderr_handler.addFilter(StderrFilter())
+    for handler in (stdout_handler, stderr_handler):
+        logger.addHandler(handler)
+    logger.setLevel(logging.INFO)
+
+
+operations = {
+    "add-trusted-keys": AddTrustedKeys,
+    "build-oci": BuildOCI,
+    "build-charm": BuildCharm,
+    "buildlivefs": BuildLiveFS,
+    "buildsnap": BuildSnap,
+    "generate-translation-templates": GenerateTranslationTemplates,
+    "override-sources-list": OverrideSourcesList,
+    "mount-chroot": Start,
+    "remove-build": Remove,
+    "run-ci": RunCI,
+    "run-ci-prepare": RunCIPrepare,
+    "scan-for-processes": KillProcesses,
+    "umount-chroot": Stop,
+    "unpack-chroot": Create,
+    "update-debian-chroot": Update,
+}
+
+
+def parse_args(args=None):
+    parser = ArgumentParser(description="Run an operation in the target.")
+    subparsers = parser.add_subparsers(metavar="OPERATION")
+    for name, factory in sorted(operations.items()):
+        subparser = subparsers.add_parser(
+            name, description=factory.description, help=factory.description
+        )
+        factory.add_arguments(subparser)
+        subparser.set_defaults(operation_factory=factory)
+    args = parser.parse_args(args=args)
+    args.operation = args.operation_factory(args, parser)
+    return args
diff --git a/lpbuildd/target/generate_translation_templates.py b/lpbuildd/target/generate_translation_templates.py
new file mode 100644
index 0000000..2f8874d
--- /dev/null
+++ b/lpbuildd/target/generate_translation_templates.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os.path
+
+from lpbuildd.pottery import intltool
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.vcs import VCSOperationMixin
+
+logger = logging.getLogger(__name__)
+
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+class GenerateTranslationTemplates(VCSOperationMixin, Operation):
+    """Script to generate translation templates from a branch."""
+
+    description = "Generate templates for a branch."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "result_name",
+            help="the name of the result tarball; should end in '.tar.gz'",
+        )
+
+    def __init__(self, args, parser):
+        super().__init__(args, parser)
+        self.work_dir = os.environ["HOME"]
+        self.branch_dir = os.path.join(self.work_dir, "source-tree")
+
+    def install(self):
+        logger.info("Installing dependencies...")
+        deps = ["intltool"]
+        deps.extend(self.vcs_deps)
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+
+    def fetch(self, quiet=False):
+        logger.info("Fetching %s...", self.vcs_description)
+        self.vcs_fetch(
+            os.path.basename(self.branch_dir), cwd=self.work_dir, quiet=quiet
+        )
+
+    def _makeTarball(self, files):
+        """Put the given files into a tarball in the working directory."""
+        tarname = os.path.join(self.work_dir, self.args.result_name)
+        logger.info("Making tarball with templates in %s..." % tarname)
+        cmd = ["tar", "-C", self.branch_dir, "-czf", tarname]
+        files = [name for name in files if not name.endswith("/")]
+        for path in files:
+            full_path = os.path.join(self.branch_dir, path)
+            logger.info("Adding template %s..." % full_path)
+            cmd.append(path)
+        self.backend.run(cmd)
+        logger.info("Tarball generated.")
+
+    def generate(self):
+        logger.info("Generating templates...")
+        pots = intltool.generate_pots(self.backend, self.branch_dir)
+        logger.info("Generated %d templates." % len(pots))
+        if len(pots) > 0:
+            self._makeTarball(pots)
+
+    def run(self):
+        """Do It.  Generate templates."""
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.fetch()
+            self.generate()
+        except Exception:
+            logger.exception("Build failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/lifecycle.py b/lpbuildd/target/lifecycle.py
new file mode 100644
index 0000000..38f2cef
--- /dev/null
+++ b/lpbuildd/target/lifecycle.py
@@ -0,0 +1,82 @@
+# Copyright 2009-2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os
+import sys
+
+from lpbuildd.target.backend import BackendException
+from lpbuildd.target.operation import Operation
+
+logger = logging.getLogger(__name__)
+
+
+class Create(Operation):
+    description = "Create the target environment."
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--image-type", default="chroot", help="base image type"
+        )
+        parser.add_argument("image_path", help="path to base image")
+
+    def run(self):
+        logger.info("Creating target for build %s", self.args.build_id)
+        self.backend.create(self.args.image_path, self.args.image_type)
+        return 0
+
+
+class Start(Operation):
+    description = "Start the target environment."
+
+    def run(self):
+        logger.info("Starting target for build %s", self.args.build_id)
+        self.backend.start()
+        return 0
+
+
+class KillProcesses(Operation):
+    description = "Kill any processes in the target."
+
+    def run(self):
+        # This operation must run as root, since we want to iterate over
+        # other users' processes in Python.
+        if os.geteuid() != 0:
+            cmd = ["sudo"]
+            if "PYTHONPATH" in os.environ:
+                cmd.append("PYTHONPATH=%s" % os.environ["PYTHONPATH"])
+            cmd.append("--")
+            cmd.extend(sys.argv)
+            os.execv("/usr/bin/sudo", cmd)
+        return self._run()
+
+    def _run(self):
+        logger.info(
+            "Scanning for processes to kill in build %s", self.args.build_id
+        )
+        self.backend.kill_processes()
+        return 0
+
+
+class Stop(Operation):
+    description = "Stop the target environment."
+
+    def run(self):
+        logger.info("Stopping target for build %s", self.args.build_id)
+        try:
+            self.backend.stop()
+        except BackendException:
+            logger.exception("Failed to stop target")
+            return 1
+        return 0
+
+
+class Remove(Operation):
+    description = "Remove the target environment."
+
+    def run(self):
+        logger.info("Removing build %s", self.args.build_id)
+        self.backend.remove()
+        return 0
diff --git a/lpbuildd/target/lxd.py b/lpbuildd/target/lxd.py
new file mode 100644
index 0000000..3d66765
--- /dev/null
+++ b/lpbuildd/target/lxd.py
@@ -0,0 +1,812 @@
+# Copyright 2017 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import io
+import json
+import os
+import re
+import stat
+import subprocess
+import tarfile
+import time
+from contextlib import closing
+from functools import cached_property
+from textwrap import dedent
+
+import netaddr
+import pylxd
+from pylxd.exceptions import LXDAPIException
+
+from lpbuildd.target.backend import Backend, BackendException
+from lpbuildd.util import set_personality, shell_escape
+
+LXD_RUNNING = 103
+
+
+def get_device_mapper_major():
+    """Return the major device number used by the devicemapper on this system.
+
+    This is not consistent across kernel versions, sadly.
+    """
+    with open("/proc/devices") as devices:
+        for line in devices:
+            if line.rstrip("\n").endswith(" device-mapper"):
+                return int(line.split()[0])
+        else:
+            raise Exception(
+                "Cannot determine major device number for device-mapper"
+            )
+
+
+fallback_hosts = dedent(
+    """\
+    127.0.0.1\tlocalhost
+    ::1\tlocalhost ip6-localhost ip6-loopback
+    fe00::0\tip6-localnet
+    ff00::0\tip6-mcastprefix
+    ff02::1\tip6-allnodes
+    ff02::2\tip6-allrouters
+    """
+)
+
+
+policy_rc_d = dedent(
+    """\
+    #! /bin/sh
+    while :; do
+        case "$1" in
+            -*) shift ;;
+            systemd-udevd|systemd-udevd.service|udev|udev.service)
+                exit 0 ;;
+            snapd|snapd.*)
+                exit 0 ;;
+            *)
+                echo "Not running services in chroot."
+                exit 101
+                ;;
+        esac
+    done
+    """
+)
+
+
+class LXDException(Exception):
+    """Wrap an LXDAPIException with some more useful information."""
+
+    def __init__(self, action, lxdapi_exc):
+        self.action = action
+        self.lxdapi_exc = lxdapi_exc
+
+    def __str__(self):
+        return f"{self.action}: {self.lxdapi_exc}"
+
+
+class LXD(Backend):
+    supports_snapd = True
+
+    # Architecture mapping
+    arches = {
+        "amd64": "x86_64",
+        "arm64": "aarch64",
+        "armhf": "armv7l",
+        "i386": "i686",
+        "powerpc": "ppc",
+        "ppc64el": "ppc64le",
+        "riscv64": "riscv64",
+        "s390x": "s390x",
+    }
+
+    profile_name = "lpbuildd"
+    bridge_name = "lpbuilddbr0"
+    # XXX cjwatson 2017-08-07: Hardcoded for now to be in a range reserved
+    # for employee private networks in
+    # https://wiki.canonical.com/InformationInfrastructure/IS/Network, so it
+    # won't collide with any production networks.  This should be
+    # configurable.
+    ipv4_network = netaddr.IPNetwork("10.10.10.1/24")
+    run_dir = "/run/launchpad-buildd"
+
+    _client = None
+
+    @property
+    def client(self):
+        if self._client is None:
+            self._client = pylxd.Client()
+        return self._client
+
+    @property
+    def lxc_arch(self):
+        return self.arches[self.arch]
+
+    @property
+    def alias(self):
+        return f"lp-{self.series}-{self.arch}"
+
+    @property
+    def name(self):
+        return self.alias
+
+    def is_running(self):
+        try:
+            container = self.client.containers.get(self.name)
+            return container.status_code == LXD_RUNNING
+        except LXDAPIException:
+            return False
+
+    def _convert(self, source_tarball, target_tarball):
+        creation_time = source_tarball.getmember("chroot-autobuild").mtime
+        metadata = {
+            "architecture": self.lxc_arch,
+            "creation_date": creation_time,
+            "properties": {
+                "os": "Ubuntu",
+                "series": self.series,
+                "architecture": self.arch,
+                "description": (
+                    f"Launchpad chroot for Ubuntu {self.series} ({self.arch})"
+                ),
+            },
+        }
+        # Encoding this as JSON is good enough, and saves pulling in a YAML
+        # library dependency.
+        metadata_yaml = (
+            json.dumps(
+                metadata,
+                sort_keys=True,
+                indent=4,
+                separators=(",", ": "),
+                ensure_ascii=False,
+            ).encode("UTF-8")
+            + b"\n"
+        )
+        metadata_file = tarfile.TarInfo(name="metadata.yaml")
+        metadata_file.size = len(metadata_yaml)
+        target_tarball.addfile(metadata_file, io.BytesIO(metadata_yaml))
+
+        # Mangle the chroot tarball into the form needed by LXD: when using
+        # the combined metadata/rootfs form, the rootfs must be under
+        # rootfs/ rather than under chroot-autobuild/.
+        for entry in source_tarball:
+            fileptr = None
+            try:
+                orig_name = entry.name.split("chroot-autobuild", 1)[-1]
+                entry.name = "rootfs" + orig_name
+
+                if entry.isfile():
+                    try:
+                        fileptr = source_tarball.extractfile(entry.name)
+                    except KeyError:
+                        pass
+                elif entry.islnk():
+                    # Update hardlinks to point to the right target
+                    entry.linkname = (
+                        "rootfs"
+                        + entry.linkname.split("chroot-autobuild", 1)[-1]
+                    )
+
+                target_tarball.addfile(entry, fileobj=fileptr)
+            finally:
+                if fileptr is not None:
+                    fileptr.close()
+
+    def _init(self):
+        """Configure LXD if necessary."""
+        # "lxd init" creates a key pair (see
+        # https://linuxcontainers.org/lxd/docs/master/authentication/), so
+        # check for that to see whether LXD has already been initialized.
+        if not os.path.exists("/var/snap/lxd/common/lxd/server.key"):
+            subprocess.check_call(["sudo", "lxd", "init", "--auto"])
+            # Generate a LXD client certificate for the buildd user.
+            with open("/dev/null", "w") as devnull:
+                subprocess.call(["lxc", "list"], stdout=devnull)
+
+    def create(self, image_path, image_type):
+        """See `Backend`."""
+        self._init()
+        self.remove_image()
+
+        # This is a lot of data to shuffle around in Python, but there
+        # doesn't currently seem to be any way to ask pylxd to ask lxd to
+        # import an image from a file on disk.
+        if image_type == "chroot":
+            with io.BytesIO() as target_file:
+                with tarfile.open(name=image_path, mode="r") as source_tarball:
+                    with tarfile.open(
+                        fileobj=target_file, mode="w"
+                    ) as target_tarball:
+                        self._convert(source_tarball, target_tarball)
+
+                image = self.client.images.create(
+                    target_file.getvalue(), wait=True
+                )
+        elif image_type == "lxd":
+            with open(image_path, "rb") as image_file:
+                image = self.client.images.create(image_file.read(), wait=True)
+        else:
+            raise ValueError("Unhandled image type: %s" % image_type)
+
+        image.add_alias(self.alias, self.alias)
+
+    @property
+    def sys_dir(self):
+        return os.path.join("/sys/class/net", self.bridge_name)
+
+    @property
+    def dnsmasq_pid_file(self):
+        return os.path.join(self.run_dir, "dnsmasq.pid")
+
+    def iptables(self, args, check=True):
+        call = subprocess.check_call if check else subprocess.call
+        call(
+            ["sudo", "iptables", "-w"]
+            + args
+            + ["-m", "comment", "--comment", "managed by launchpad-buildd"]
+        )
+
+    def start_bridge(self):
+        if not os.path.isdir(self.run_dir):
+            os.makedirs(self.run_dir)
+        subprocess.check_call(
+            [
+                "sudo",
+                "ip",
+                "link",
+                "add",
+                "dev",
+                self.bridge_name,
+                "type",
+                "bridge",
+            ]
+        )
+        subprocess.check_call(
+            [
+                "sudo",
+                "ip",
+                "addr",
+                "add",
+                str(self.ipv4_network),
+                "dev",
+                self.bridge_name,
+            ]
+        )
+        subprocess.check_call(
+            ["sudo", "ip", "link", "set", "dev", self.bridge_name, "up"]
+        )
+        subprocess.check_call(
+            ["sudo", "sysctl", "-q", "-w", "net.ipv4.ip_forward=1"]
+        )
+        self.iptables(
+            [
+                "-t",
+                "mangle",
+                "-A",
+                "FORWARD",
+                "-i",
+                self.bridge_name,
+                "-p",
+                "tcp",
+                "--tcp-flags",
+                "SYN,RST",
+                "SYN",
+                "-j",
+                "TCPMSS",
+                "--clamp-mss-to-pmtu",
+            ]
+        )
+        self.iptables(
+            [
+                "-t",
+                "nat",
+                "-A",
+                "POSTROUTING",
+                "-s",
+                str(self.ipv4_network),
+                "!",
+                "-d",
+                str(self.ipv4_network),
+                "-j",
+                "MASQUERADE",
+            ]
+        )
+        subprocess.check_call(
+            [
+                "sudo",
+                "/usr/sbin/dnsmasq",
+                "-s",
+                "lpbuildd",
+                "-S",
+                "/lpbuildd/",
+                "-u",
+                "buildd",
+                "--strict-order",
+                "--bind-interfaces",
+                "--pid-file=%s" % self.dnsmasq_pid_file,
+                "--except-interface=lo",
+                "--interface=%s" % self.bridge_name,
+                "--listen-address=%s" % str(self.ipv4_network.ip),
+            ]
+        )
+
+    def stop_bridge(self):
+        if not os.path.isdir(self.sys_dir):
+            return
+        subprocess.call(
+            ["sudo", "ip", "addr", "flush", "dev", self.bridge_name]
+        )
+        subprocess.call(
+            ["sudo", "ip", "link", "set", "dev", self.bridge_name, "down"]
+        )
+        self.iptables(
+            [
+                "-t",
+                "mangle",
+                "-D",
+                "FORWARD",
+                "-i",
+                self.bridge_name,
+                "-p",
+                "tcp",
+                "--tcp-flags",
+                "SYN,RST",
+                "SYN",
+                "-j",
+                "TCPMSS",
+                "--clamp-mss-to-pmtu",
+            ]
+        )
+        self.iptables(
+            [
+                "-t",
+                "nat",
+                "-D",
+                "POSTROUTING",
+                "-s",
+                str(self.ipv4_network),
+                "!",
+                "-d",
+                str(self.ipv4_network),
+                "-j",
+                "MASQUERADE",
+            ],
+            check=False,
+        )
+        if os.path.exists(self.dnsmasq_pid_file):
+            with open(self.dnsmasq_pid_file) as f:
+                try:
+                    dnsmasq_pid = int(f.read())
+                except Exception:
+                    pass
+                else:
+                    # dnsmasq is supposed to drop privileges, but kill it as
+                    # root just in case it fails to do so for some reason.
+                    subprocess.call(["sudo", "kill", "-9", str(dnsmasq_pid)])
+            os.unlink(self.dnsmasq_pid_file)
+        subprocess.call(["sudo", "ip", "link", "delete", self.bridge_name])
+
+    @cached_property
+    def _nvidia_container_paths(self):
+        """The paths that need to be bind-mounted for NVIDIA CUDA support.
+
+        LXD's security.privileged=true and nvidia.runtime=true options are
+        unfortunately incompatible, but we can emulate the important bits of
+        the latter with some tactical bind-mounts.  There is no very good
+        way to do this; this seems like the least unpleasant approach.
+        """
+        env = dict(os.environ)
+        env["LD_LIBRARY_PATH"] = "/snap/lxd/current/lib"
+        return subprocess.check_output(
+            ["/snap/lxd/current/bin/nvidia-container-cli.real", "list"],
+            env=env,
+            universal_newlines=True,
+        ).splitlines()
+
+    def create_profile(self):
+        for addr in self.ipv4_network:
+            if addr not in (
+                self.ipv4_network.network,
+                self.ipv4_network.ip,
+                self.ipv4_network.broadcast,
+            ):
+                ipv4_address = netaddr.IPNetwork(
+                    (int(addr), self.ipv4_network.prefixlen)
+                )
+                break
+        else:
+            raise BackendException(
+                "%s has no usable IP addresses" % self.ipv4_network
+            )
+
+        try:
+            old_profile = self.client.profiles.get(self.profile_name)
+        except LXDAPIException:
+            pass
+        else:
+            old_profile.delete()
+
+        raw_lxc_config = [
+            ("lxc.cap.drop", ""),
+            ("lxc.cap.drop", "sys_time sys_module"),
+            ("lxc.cgroup.devices.deny", ""),
+            ("lxc.cgroup.devices.allow", ""),
+            ("lxc.cgroup2.devices.deny", ""),
+            ("lxc.cgroup2.devices.allow", ""),
+            ("lxc.mount.auto", ""),
+            ("lxc.mount.auto", "proc:rw sys:rw"),
+            (
+                "lxc.mount.entry",
+                "udev /dev devtmpfs rw,nosuid,relatime,mode=755,inode64",
+            ),
+            ("lxc.autodev", "0"),
+        ]
+
+        lxc_version = self._client.host_info["environment"]["driver_version"]
+        major, minor = (int(v) for v in lxc_version.split(".")[0:2])
+
+        if major >= 3:
+            raw_lxc_config.extend(
+                [
+                    ("lxc.apparmor.profile", "unconfined"),
+                    ("lxc.net.0.ipv4.address", ipv4_address),
+                    ("lxc.net.0.ipv4.gateway", self.ipv4_network.ip),
+                ]
+            )
+        else:
+            raw_lxc_config.extend(
+                [
+                    ("lxc.aa_profile", "unconfined"),
+                    ("lxc.network.0.ipv4", ipv4_address),
+                    ("lxc.network.0.ipv4.gateway", self.ipv4_network.ip),
+                ]
+            )
+
+        # Linux 4.4 on powerpc doesn't support all the seccomp bits that LXD
+        # needs.
+        if self.arch == "powerpc":
+            raw_lxc_config.append(("lxc.seccomp", ""))
+        config = {
+            "security.privileged": "true",
+            "security.nesting": "true",
+            "raw.lxc": "".join(
+                f"{key}={value}\n" for key, value in sorted(raw_lxc_config)
+            ),
+        }
+        devices = {
+            "eth0": {
+                "name": "eth0",
+                "nictype": "bridged",
+                "parent": self.bridge_name,
+                "type": "nic",
+            },
+        }
+        if major >= 3:
+            devices["root"] = {
+                "path": "/",
+                "pool": "default",
+                "type": "disk",
+            }
+        if "gpu-nvidia" in self.constraints:
+            for i, path in enumerate(self._nvidia_container_paths):
+                # Skip devices here, because bind-mounted devices aren't
+                # propagated into snaps (such as lxd) installed inside the
+                # container, which causes LXC's nvidia hook to fail.  We'll
+                # create the relevant device nodes after the container has
+                # started.
+                if not path.startswith("/dev/"):
+                    devices[f"nvidia-{i}"] = {
+                        "path": path,
+                        "source": path,
+                        "type": "disk",
+                    }
+        self.client.profiles.create(self.profile_name, config, devices)
+
+    def start(self):
+        """See `Backend`."""
+        self.stop()
+
+        self.create_profile()
+        self.start_bridge()
+
+        container = self.client.containers.create(
+            {
+                "name": self.name,
+                "profiles": [self.profile_name],
+                "source": {"type": "image", "alias": self.alias},
+            },
+            wait=True,
+        )
+
+        hostname = subprocess.check_output(
+            ["hostname"], universal_newlines=True
+        ).rstrip("\n")
+        fqdn = subprocess.check_output(
+            ["hostname", "--fqdn"], universal_newlines=True
+        ).rstrip("\n")
+        with self.open("/etc/hosts", mode="a") as hosts_file:
+            hosts_file.seek(0, os.SEEK_END)
+            if not hosts_file.tell():
+                # /etc/hosts is missing or empty
+                hosts_file.write(fallback_hosts)
+            print(f"\n127.0.1.1\t{fqdn} {hostname}", file=hosts_file)
+            os.fchmod(hosts_file.fileno(), 0o644)
+        with self.open("/etc/hostname", mode="w+") as hostname_file:
+            print(hostname, file=hostname_file)
+            os.fchmod(hostname_file.fileno(), 0o644)
+
+        resolv_conf = "/etc/resolv.conf"
+
+        if os.path.islink(resolv_conf):
+            resolv_conf = os.path.realpath(resolv_conf)
+            if (
+                resolv_conf == "/run/systemd/resolve/stub-resolv.conf"
+                and os.path.isfile("/run/systemd/resolve/resolv.conf")
+            ):
+                resolv_conf = "/run/systemd/resolve/resolv.conf"
+
+        self.copy_in(resolv_conf, "/etc/resolv.conf")
+
+        with self.open(
+            "/usr/local/sbin/policy-rc.d", mode="w+"
+        ) as policy_rc_d_file:
+            policy_rc_d_file.write(policy_rc_d)
+            os.fchmod(policy_rc_d_file.fileno(), 0o755)
+        # For targets that use Upstart, prevent the mounted-dev job from
+        # creating devices.  Most of the devices it creates are unnecessary
+        # in a container, and creating loop devices will race with our own
+        # code to do so.
+        if self.path_exists("/etc/init/mounted-dev.conf"):
+            with self.open("/etc/init/mounted-dev.conf") as mounted_dev_file:
+                script = ""
+                in_script = False
+                for line in mounted_dev_file:
+                    if in_script:
+                        script += re.sub(
+                            r"^(\s*)(.*MAKEDEV)", r"\1: # \2", line
+                        )
+                        if line.strip() == "end script":
+                            in_script = False
+                    elif line.strip() == "script":
+                        script += line
+                        in_script = True
+
+            if script:
+                with self.open(
+                    "/etc/init/mounted-dev.override", mode="w"
+                ) as mounted_dev_override_file:
+                    mounted_dev_override_file.write(script)
+                    os.fchmod(mounted_dev_override_file.fileno(), 0o644)
+
+        # Start the container and wait for it to start.
+        container.start(wait=True)
+        timeout = 60
+        now = time.time()
+        while time.time() < now + timeout:
+            try:
+                container = self.client.containers.get(self.name)
+            except LXDAPIException:
+                container = None
+                break
+            if container.status_code == LXD_RUNNING:
+                break
+            time.sleep(1)
+        if container is None or container.status_code != LXD_RUNNING:
+            raise BackendException(
+                "Container failed to start within %d seconds" % timeout
+            )
+
+        # Create dm-# devices.  On focal kpartx looks for dm devices and hangs
+        # in their absence.
+        major = get_device_mapper_major()
+        for minor in range(8):
+            if not self.path_exists(f"/dev/dm-{minor}"):
+                self.run(
+                    [
+                        "mknod",
+                        "-m",
+                        "0660",
+                        f"/dev/dm-{minor}",
+                        "b",
+                        str(major),
+                        str(minor),
+                    ]
+                )
+
+        if "gpu-nvidia" in self.constraints:
+            # Create nvidia* devices.  We have to do this here rather than
+            # bind-mounting them into the container, because bind-mounts
+            # aren't propagated into snaps (such as lxd) installed inside
+            # the container.
+            for path in self._nvidia_container_paths:
+                if path.startswith("/dev/"):
+                    st = os.stat(path)
+                    if stat.S_ISCHR(st.st_mode) and not self.path_exists(path):
+                        self.run(
+                            [
+                                "mknod",
+                                "-m",
+                                "0%o" % stat.S_IMODE(st.st_mode),
+                                path,
+                                "c",
+                                str(os.major(st.st_rdev)),
+                                str(os.minor(st.st_rdev)),
+                            ]
+                        )
+
+            # We bind-mounted several libraries into the container, so run
+            # ldconfig to update the dynamic linker's cache.
+            self.run(["/sbin/ldconfig"])
+
+        # XXX cjwatson 2017-09-07: With LXD < 2.2 we can't create the
+        # directory until the container has started.  We can get away with
+        # this for the time being because snapd isn't in the buildd chroots.
+        self.run(["mkdir", "-p", "/etc/systemd/system/snapd.service.d"])
+        with self.open(
+            "/etc/systemd/system/snapd.service.d/no-cdn.conf", mode="w+"
+        ) as no_cdn_file:
+            print(
+                dedent(
+                    """\
+                [Service]
+                Environment=SNAPPY_STORE_NO_CDN=1
+                """
+                ),
+                file=no_cdn_file,
+                end="",
+            )
+            os.fchmod(no_cdn_file.fileno(), 0o644)
+
+        # Refreshing snaps from a timer unit during a build isn't
+        # appropriate.  Mask this, but manually so that we don't depend on
+        # systemctl existing.  This relies on /etc/systemd/system/ having
+        # been created above.
+        self.run(
+            [
+                "ln",
+                "-s",
+                "/dev/null",
+                "/etc/systemd/system/snapd.refresh.timer",
+            ]
+        )
+
+        if self.arch == "armhf":
+            # Work around https://github.com/lxc/lxcfs/issues/553.  In
+            # principle that could result in over-reporting the number of
+            # available CPU cores, but that isn't a concern in
+            # launchpad-buildd.
+            try:
+                self.run(["umount", "/proc/cpuinfo"])
+            except subprocess.CalledProcessError:
+                pass
+
+    def run(
+        self,
+        args,
+        cwd=None,
+        env=None,
+        input_text=None,
+        get_output=False,
+        echo=False,
+        return_process=False,
+        **kwargs,
+    ):
+        """See `Backend`."""
+        env_params = []
+        if env:
+            for key, value in env.items():
+                env_params.extend(["--env", f"{key}={value}"])
+        if self.arch is not None:
+            args = set_personality(args, self.arch, series=self.series)
+        if cwd is not None:
+            # This requires either a helper program in the chroot or
+            # unpleasant quoting.  For now we go for the unpleasant quoting,
+            # though once we have coreutils >= 8.28 everywhere we'll be able
+            # to use "env --chdir".
+            escaped_args = " ".join(shell_escape(arg) for arg in args)
+            args = [
+                "/bin/sh",
+                "-c",
+                f"cd {shell_escape(cwd)} && {escaped_args}",
+            ]
+        if echo:
+            print(
+                "Running in container: %s"
+                % " ".join(shell_escape(arg) for arg in args)
+            )
+        # pylxd's Container.execute doesn't support sending stdin, and it's
+        # tedious to implement ourselves.
+        cmd = ["lxc", "exec", self.name] + env_params + ["--"] + args
+        if input_text is None and not get_output:
+            subprocess.check_call(cmd, **kwargs)
+        else:
+            if get_output:
+                kwargs["stdout"] = subprocess.PIPE
+            proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, **kwargs)
+            if return_process:
+                return proc
+            output, _ = proc.communicate(input_text)
+            if proc.returncode:
+                raise subprocess.CalledProcessError(proc.returncode, cmd)
+            if get_output:
+                if echo:
+                    print("Output:")
+                    output_text = output
+                    if isinstance(output_text, bytes):
+                        output_text = output_text.decode("UTF-8", "replace")
+                    print(output_text)
+                return output
+
+    def copy_in(self, source_path, target_path):
+        """See `Backend`."""
+        # pylxd's FilesManager doesn't support sending UID/GID/mode.
+        container = self.client.containers.get(self.name)
+        with open(source_path, "rb") as source_file:
+            params = {"path": target_path}
+            data = source_file.read()
+            mode = stat.S_IMODE(os.fstat(source_file.fileno()).st_mode)
+            headers = {
+                "X-LXD-uid": "0",
+                "X-LXD-gid": "0",
+                # Go (and hence LXD) only supports 0o prefixes for octal
+                # numbers as of Go 1.13, and it's not clear that we can
+                # assume this.  Use plain 0 prefixes instead.
+                "X-LXD-mode": "0%o" % mode if mode else "0",
+            }
+            try:
+                container.api.files.post(
+                    params=params, data=data, headers=headers
+                )
+            except LXDAPIException as e:
+                raise LXDException(
+                    f"Failed to push {self.name}:{target_path}", e
+                )
+
+    def _get_file(self, container, *args, **kwargs):
+        # pylxd < 2.1.1 tries to validate the response as JSON in streaming
+        # mode and ends up running out of memory on large files.  Work
+        # around this.
+        response = container.api.files.session.get(
+            container.api.files._api_endpoint, *args, **kwargs
+        )
+        if response.status_code != 200:
+            raise LXDAPIException(response)
+        return response
+
+    def copy_out(self, source_path, target_path):
+        # pylxd's FilesManager doesn't support streaming, which is important
+        # since copied-out files may be large.
+        # This ignores UID/GID/mode, but then so does "lxc file pull".
+        container = self.client.containers.get(self.name)
+        with open(target_path, "wb") as target_file:
+            params = {"path": source_path}
+            try:
+                with closing(
+                    self._get_file(container, params=params, stream=True)
+                ) as response:
+                    for chunk in response.iter_content(chunk_size=65536):
+                        target_file.write(chunk)
+            except LXDAPIException as e:
+                raise LXDException(
+                    f"Failed to pull {self.name}:{source_path}", e
+                )
+
+    def stop(self):
+        """See `Backend`."""
+        try:
+            container = self.client.containers.get(self.name)
+        except LXDAPIException:
+            pass
+        else:
+            if container.status_code == LXD_RUNNING:
+                container.stop(wait=True)
+            container.delete(wait=True)
+        self.stop_bridge()
+
+    def remove_image(self):
+        for image in self.client.images.all():
+            if any(alias["name"] == self.alias for alias in image.aliases):
+                image.delete(wait=True)
+                return
+
+    def remove(self):
+        """See `Backend`."""
+        self.remove_image()
+        super().remove()
diff --git a/lpbuildd/target/operation.py b/lpbuildd/target/operation.py
new file mode 100644
index 0000000..1590271
--- /dev/null
+++ b/lpbuildd/target/operation.py
@@ -0,0 +1,65 @@
+# Copyright 2017-2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+from collections import OrderedDict
+
+from lpbuildd.target.backend import make_backend
+
+
+class Operation:
+    """An operation to perform on the target environment."""
+
+    description = "An unidentified operation."
+    buildd_path = "/build"
+
+    @classmethod
+    def add_arguments(cls, parser):
+        parser.add_argument(
+            "--backend",
+            choices=["chroot", "lxd", "fake", "uncontained"],
+            help="use this type of backend",
+        )
+        parser.add_argument(
+            "--series", metavar="SERIES", help="operate on series SERIES"
+        )
+        parser.add_argument(
+            "--arch", metavar="ARCH", help="operate on architecture ARCH"
+        )
+        parser.add_argument(
+            "--constraint",
+            metavar="CONSTRAINT",
+            action="append",
+            dest="constraints",
+            help="add builder resource tag for this build",
+        )
+        parser.add_argument(
+            "build_id", metavar="ID", help="operate on build ID"
+        )
+
+    def __init__(self, args, parser):
+        self.args = args
+        self.backend = make_backend(
+            self.args.backend,
+            self.args.build_id,
+            series=self.args.series,
+            arch=self.args.arch,
+            constraints=self.args.constraints,
+        )
+
+    def run_build_command(self, args, env=None, **kwargs):
+        """Run a build command in the target.
+
+        :param args: the command and arguments to run.
+        :param env: dictionary of additional environment variables to set.
+        :param kwargs: any other keyword arguments to pass to Backend.run.
+        """
+        full_env = OrderedDict()
+        full_env["LANG"] = "C.UTF-8"
+        full_env["SHELL"] = "/bin/sh"
+        if env:
+            full_env.update(env)
+        cwd = kwargs.pop("cwd", self.buildd_path)
+        return self.backend.run(args, cwd=cwd, env=full_env, **kwargs)
+
+    def run(self):
+        raise NotImplementedError
diff --git a/lpbuildd/target/proxy.py b/lpbuildd/target/proxy.py
new file mode 100644
index 0000000..2f264c1
--- /dev/null
+++ b/lpbuildd/target/proxy.py
@@ -0,0 +1,47 @@
+# Copyright 2019-2020 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os
+import sys
+from collections import OrderedDict
+
+
+class BuilderProxyOperationMixin:
+    """Methods supporting the build time HTTP proxy for certain build types."""
+
+    def __init__(self, args, parser):
+        super().__init__(args, parser)
+        self.bin = os.path.dirname(sys.argv[0])
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument("--proxy-url", help="builder proxy url")
+        parser.add_argument(
+            "--revocation-endpoint",
+            help="builder proxy token revocation endpoint",
+        )
+
+    @property
+    def proxy_deps(self):
+        return ["python3", "socat"]
+
+    def install_git_proxy(self):
+        self.backend.copy_in(
+            os.path.join(self.bin, "lpbuildd-git-proxy"),
+            "/usr/local/bin/lpbuildd-git-proxy",
+        )
+
+    def build_proxy_environment(self, proxy_url=None, env=None):
+        """Extend a command environment to include http proxy variables."""
+        full_env = OrderedDict()
+        if env:
+            full_env.update(env)
+        if proxy_url:
+            full_env["http_proxy"] = self.args.proxy_url
+            full_env["https_proxy"] = self.args.proxy_url
+            full_env["GIT_PROXY_COMMAND"] = "/usr/local/bin/lpbuildd-git-proxy"
+            # Avoid needing to keep track of snap store CDNs in proxy
+            # configuration.
+            full_env["SNAPPY_STORE_NO_CDN"] = "1"
+        return full_env
diff --git a/lpbuildd/target/run_ci.py b/lpbuildd/target/run_ci.py
new file mode 100644
index 0000000..54aca19
--- /dev/null
+++ b/lpbuildd/target/run_ci.py
@@ -0,0 +1,244 @@
+# Copyright 2022 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import logging
+import os
+
+from lpbuildd.target.build_snap import SnapChannelsAction
+from lpbuildd.target.operation import Operation
+from lpbuildd.target.proxy import BuilderProxyOperationMixin
+from lpbuildd.target.snapstore import SnapStoreOperationMixin
+from lpbuildd.target.vcs import VCSOperationMixin
+from lpbuildd.util import shell_escape
+
+RETCODE_FAILURE_INSTALL = 200
+RETCODE_FAILURE_BUILD = 201
+
+
+logger = logging.getLogger(__name__)
+
+
+class RunCIPrepare(
+    BuilderProxyOperationMixin,
+    VCSOperationMixin,
+    SnapStoreOperationMixin,
+    Operation,
+):
+    description = "Prepare for running CI jobs."
+    buildd_path = "/build/tree"
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--channel",
+            action=SnapChannelsAction,
+            metavar="SNAP=CHANNEL",
+            dest="channels",
+            default={},
+            help="install SNAP from CHANNEL",
+        )
+        parser.add_argument(
+            "--scan-malware",
+            action="store_true",
+            default=False,
+            help="perform malware scans on output files",
+        )
+        parser.add_argument(
+            "--clamav-database-url",
+            help="override default ClamAV database URL",
+        )
+
+    def install(self):
+        logger.info("Running install phase...")
+        deps = []
+        if self.args.proxy_url:
+            deps.extend(self.proxy_deps)
+            self.install_git_proxy()
+        if self.backend.supports_snapd:
+            for dep in "snapd", "fuse", "squashfuse":
+                if self.backend.is_package_available(dep):
+                    deps.append(dep)
+        deps.extend(self.vcs_deps)
+        if self.args.scan_malware:
+            deps.append("clamav")
+        self.backend.run(["apt-get", "-y", "install"] + deps)
+        if self.backend.supports_snapd:
+            self.snap_store_set_proxy()
+        for snap_name, channel in sorted(self.args.channels.items()):
+            if snap_name not in ("lxd", "lpci"):
+                self.backend.run(
+                    ["snap", "install", "--channel=%s" % channel, snap_name]
+                )
+        for snap_name, classic in (("lxd", False), ("lpci", True)):
+            cmd = ["snap", "install"]
+            if classic:
+                cmd.append("--classic")
+            if snap_name in self.args.channels:
+                cmd.append("--channel=%s" % self.args.channels[snap_name])
+            cmd.append(snap_name)
+            self.backend.run(cmd)
+        self.backend.run(["lxd", "init", "--auto"])
+        if self.args.scan_malware:
+            # lpbuildd.target.lxd configures the container not to run most
+            # services, which is convenient since it allows us to ensure
+            # that ClamAV's database is up to date before proceeding.
+            if self.args.clamav_database_url:
+                with self.backend.open(
+                    "/etc/clamav/freshclam.conf", mode="a"
+                ) as freshclam_file:
+                    freshclam_file.write(
+                        f"PrivateMirror {self.args.clamav_database_url}\n"
+                    )
+            kwargs = {}
+            env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+            if env:
+                kwargs["env"] = env
+            logger.info("Downloading malware definitions...")
+            self.backend.run(["freshclam", "--quiet"], **kwargs)
+
+    def repo(self):
+        """Collect VCS branch."""
+        logger.info("Running repo phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        self.vcs_fetch("tree", cwd="/build", env=env)
+        self.vcs_update_status(self.buildd_path)
+        self.backend.run(["chown", "-R", "buildd:buildd", "/build/tree"])
+
+    def run(self):
+        try:
+            self.install()
+        except Exception:
+            logger.exception("Install failed")
+            return RETCODE_FAILURE_INSTALL
+        try:
+            self.repo()
+        except Exception:
+            logger.exception("VCS setup failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
+
+
+class RunCI(BuilderProxyOperationMixin, Operation):
+    description = "Run a CI job."
+    buildd_path = "/build/tree"
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument("job_name", help="job name to run")
+        parser.add_argument(
+            "job_index", type=int, help="index within job name to run"
+        )
+        parser.add_argument(
+            "--environment-variable",
+            dest="environment_variables",
+            type=str,
+            action="append",
+            default=[],
+            help="environment variable where key and value are separated by =",
+        )
+        parser.add_argument(
+            "--package-repository",
+            dest="package_repositories",
+            type=str,
+            action="append",
+            default=[],
+            help="single apt repository line",
+        )
+        parser.add_argument(
+            "--plugin-setting",
+            dest="plugin_settings",
+            type=str,
+            action="append",
+            default=[],
+            help="plugin setting where the key and value are separated by =",
+        )
+        parser.add_argument(
+            "--secrets",
+            type=str,
+            help="secrets where the key and the value are separated by =",
+        )
+        parser.add_argument(
+            "--scan-malware",
+            action="store_true",
+            default=False,
+            help="perform malware scans on output files",
+        )
+
+    def run_build_command(self, args, **kwargs):
+        # Run build commands as the `buildd` user, since `lpci` can only
+        # start containers with `nvidia.runtime=true` if it's run as a
+        # non-root user.
+        super().run_build_command(
+            ["runuser", "-u", "buildd", "-g", "buildd", "-G", "lxd", "--"]
+            + args,
+            **kwargs,
+        )
+
+    def run_job(self):
+        logger.info("Running job phase...")
+        env = self.build_proxy_environment(proxy_url=self.args.proxy_url)
+        job_id = f"{self.args.job_name}:{self.args.job_index}"
+        logger.info("Running %s" % job_id)
+        output_path = os.path.join("/build", "output")
+        # This matches the per-job output path used by lpci.
+        job_output_path = os.path.join(
+            output_path, self.args.job_name, str(self.args.job_index)
+        )
+        self.backend.run(["mkdir", "-p", job_output_path])
+        self.backend.run(["chown", "-R", "buildd:buildd", output_path])
+        lpci_args = [
+            "lpci",
+            "-v",
+            "run-one",
+            "--output-directory",
+            output_path,
+            self.args.job_name,
+            str(self.args.job_index),
+        ]
+        for repository in self.args.package_repositories:
+            lpci_args.extend(["--package-repository", repository])
+
+        environment_variables = dict(
+            pair.split("=", maxsplit=1)
+            for pair in self.args.environment_variables
+        )
+        for key, value in environment_variables.items():
+            lpci_args.extend(["--set-env", f"{key}={value}"])
+
+        plugin_settings = dict(
+            pair.split("=", maxsplit=1) for pair in self.args.plugin_settings
+        )
+        for key, value in plugin_settings.items():
+            lpci_args.extend(["--plugin-setting", f"{key}={value}"])
+
+        if self.args.secrets:
+            lpci_args.extend(["--secrets", self.args.secrets])
+
+        if "gpu-nvidia" in self.backend.constraints:
+            lpci_args.append("--gpu-nvidia")
+
+        escaped_lpci_args = " ".join(shell_escape(arg) for arg in lpci_args)
+        tee_args = ["tee", os.path.join(job_output_path, "log")]
+        escaped_tee_args = " ".join(shell_escape(arg) for arg in tee_args)
+        args = [
+            "/bin/bash",
+            "-o",
+            "pipefail",
+            "-c",
+            f"{escaped_lpci_args} 2>&1 | {escaped_tee_args}",
+        ]
+        self.run_build_command(args, env=env)
+
+        if self.args.scan_malware:
+            clamscan = ["clamscan", "--recursive", job_output_path]
+            self.run_build_command(clamscan, env=env)
+
+    def run(self):
+        try:
+            self.run_job()
+        except Exception:
+            logger.exception("Job failed")
+            return RETCODE_FAILURE_BUILD
+        return 0
diff --git a/lpbuildd/target/snapstore.py b/lpbuildd/target/snapstore.py
new file mode 100644
index 0000000..348e6d5
--- /dev/null
+++ b/lpbuildd/target/snapstore.py
@@ -0,0 +1,41 @@
+# Copyright 2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+from urllib.parse import urljoin, urlparse, urlunparse
+
+import requests
+
+
+class SnapStoreOperationMixin:
+    """Methods supporting operations that interact with the snap store."""
+
+    @classmethod
+    def add_arguments(cls, parser):
+        super().add_arguments(parser)
+        parser.add_argument(
+            "--snap-store-proxy-url",
+            metavar="URL",
+            help="snap store proxy URL",
+        )
+
+    def snap_store_set_proxy(self):
+        if self.args.snap_store_proxy_url is None:
+            return
+        # Canonicalise: proxy registration always sends only the scheme and
+        # domain.
+        parsed_url = urlparse(self.args.snap_store_proxy_url)
+        canonical_url = urlunparse(
+            [parsed_url.scheme, parsed_url.netloc, "", "", "", ""]
+        )
+        assertions_response = requests.get(
+            urljoin(canonical_url, "v2/auth/store/assertions")
+        )
+        assertions_response.raise_for_status()
+        self.backend.run(
+            ["snap", "ack", "/dev/stdin"], input_text=assertions_response.text
+        )
+        store_id = assertions_response.headers.get("X-Assertion-Store-Id")
+        if store_id is not None:
+            self.backend.run(
+                ["snap", "set", "core", f"proxy.store={store_id}"]
+            )
diff --git a/lpbuildd/target/status.py b/lpbuildd/target/status.py
new file mode 100644
index 0000000..f05100d
--- /dev/null
+++ b/lpbuildd/target/status.py
@@ -0,0 +1,33 @@
+# Copyright 2018-2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import json
+import os
+
+
+class StatusOperationMixin:
+    """Methods supporting operations that save extra status information.
+
+    Extra status information will be picked up by the build manager and
+    included in XML-RPC status responses.
+    """
+
+    @property
+    def _status_path(self):
+        return os.path.join(self.backend.build_path, "status")
+
+    def get_status(self):
+        """Return a copy of this operation's extra status."""
+        if os.path.exists(self._status_path):
+            with open(self._status_path) as status_file:
+                return json.load(status_file)
+        else:
+            return {}
+
+    def update_status(self, **status):
+        """Update this operation's status with key/value pairs."""
+        full_status = self.get_status()
+        full_status.update(status)
+        with open("%s.tmp" % self._status_path, "w") as status_file:
+            json.dump(full_status, status_file)
+        os.rename("%s.tmp" % self._status_path, self._status_path)
diff --git a/lpbuildd/target/tests/__init__.py b/lpbuildd/target/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lpbuildd/target/tests/__init__.py
diff --git a/lpbuildd/target/tests/dummy_templates.tar.gz b/lpbuildd/target/tests/dummy_templates.tar.gz
new file mode 100644
index 0000000..09be5ef
Binary files /dev/null and b/lpbuildd/target/tests/dummy_templates.tar.gz differ
diff --git a/lpbuildd/target/tests/matchers.py b/lpbuildd/target/tests/matchers.py
new file mode 100644
index 0000000..dfbc74e
--- /dev/null
+++ b/lpbuildd/target/tests/matchers.py
@@ -0,0 +1,57 @@
+# Copyright 2021 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+from testtools.matchers import Equals, Is, MatchesDict, MatchesListwise
+
+
+class RanCommand(MatchesListwise):
+    def __init__(
+        self,
+        args,
+        echo=None,
+        cwd=None,
+        input_text=None,
+        stdout=None,
+        stderr=None,
+        get_output=None,
+        universal_newlines=None,
+        **env,
+    ):
+        kwargs_matcher = {}
+        if echo is not None:
+            kwargs_matcher["echo"] = Is(echo)
+        if cwd:
+            kwargs_matcher["cwd"] = Equals(cwd)
+        if input_text:
+            kwargs_matcher["input_text"] = Equals(input_text)
+        if stdout is not None:
+            kwargs_matcher["stdout"] = Equals(stdout)
+        if stderr is not None:
+            kwargs_matcher["stderr"] = Equals(stderr)
+        if get_output is not None:
+            kwargs_matcher["get_output"] = Is(get_output)
+        if universal_newlines is not None:
+            kwargs_matcher["universal_newlines"] = Is(universal_newlines)
+        if env:
+            kwargs_matcher["env"] = MatchesDict(
+                {key: Equals(value) for key, value in env.items()}
+            )
+        super().__init__([Equals((args,)), MatchesDict(kwargs_matcher)])
+
+
+class RanAptGet(RanCommand):
+    def __init__(self, *args):
+        super().__init__(["apt-get", "-y"] + list(args))
+
+
+class RanSnap(RanCommand):
+    def __init__(self, *args, **kwargs):
+        super().__init__(["snap"] + list(args), **kwargs)
+
+
+class RanBuildCommand(RanCommand):
+    def __init__(self, args, **kwargs):
+        kwargs.setdefault("cwd", "/build")
+        kwargs.setdefault("LANG", "C.UTF-8")
+        kwargs.setdefault("SHELL", "/bin/sh")
+        super().__init__(args, **kwargs)
diff --git a/lpbuildd/target/tests/test_apt.py b/lpbuildd/target/tests/test_apt.py
new file mode 100644
index 0000000..26eac33
--- /dev/null
+++ b/lpbuildd/target/tests/test_apt.py
@@ -0,0 +1,324 @@
+# Copyright 2017-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import stat
+import subprocess
+import tempfile
+import time
+from textwrap import dedent
+
+from fixtures import FakeLogger
+from systemfixtures import FakeTime
+from testtools import TestCase
+from testtools.matchers import (
+    ContainsDict,
+    Equals,
+    MatchesDict,
+    MatchesListwise,
+)
+
+from lpbuildd.target.cli import parse_args
+from lpbuildd.tests.fakebuilder import FakeMethod
+
+
+class MockCopyIn(FakeMethod):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.source_bytes = None
+
+    def __call__(self, source_path, *args, **kwargs):
+        with open(source_path, "rb") as source:
+            self.source_bytes = source.read()
+        return super().__call__(source_path, *args, **kwargs)
+
+
+class TestOverrideSourcesList(TestCase):
+    def test_succeeds(self):
+        args = [
+            "override-sources-list",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "deb http://archive.ubuntu.com/ubuntu xenial main",
+            "deb http://ppa.launchpad.net/launchpad/ppa/ubuntu xenial main",
+        ]
+        override_sources_list = parse_args(args=args).operation
+        self.assertEqual(0, override_sources_list.run())
+        self.assertEqual(
+            (
+                dedent(
+                    """\
+                deb http://archive.ubuntu.com/ubuntu xenial main
+                deb http://ppa.launchpad.net/launchpad/ppa/ubuntu xenial main
+                """
+                ).encode("UTF-8"),
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs["/etc/apt/sources.list"],
+        )
+        self.assertEqual(
+            (b'Acquire::Retries "3";\n', stat.S_IFREG | 0o644),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/apt.conf.d/99retries"
+            ],
+        )
+        self.assertEqual(
+            (
+                b'APT::Get::Always-Include-Phased-Updates "true";\n',
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/apt.conf.d/99phasing"
+            ],
+        )
+        self.assertEqual(
+            (
+                b"Package: *\nPin: release a=*-proposed\nPin-Priority: 500\n",
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/preferences.d/proposed.pref"
+            ],
+        )
+        self.assertEqual(
+            (
+                b"Package: *\nPin: release a=*-backports\nPin-Priority: 500\n",
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/preferences.d/backports.pref"
+            ],
+        )
+
+    def test_apt_proxy(self):
+        args = [
+            "override-sources-list",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--apt-proxy-url",
+            "http://apt-proxy.example:3128/";,
+            "deb http://archive.ubuntu.com/ubuntu xenial main",
+        ]
+        override_sources_list = parse_args(args=args).operation
+        self.assertEqual(0, override_sources_list.run())
+        self.assertEqual(
+            (
+                dedent(
+                    """\
+                deb http://archive.ubuntu.com/ubuntu xenial main
+                """
+                ).encode("UTF-8"),
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs["/etc/apt/sources.list"],
+        )
+        self.assertEqual(
+            (b'Acquire::Retries "3";\n', stat.S_IFREG | 0o644),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/apt.conf.d/99retries"
+            ],
+        )
+        self.assertEqual(
+            (
+                b'APT::Get::Always-Include-Phased-Updates "true";\n',
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/apt.conf.d/99phasing"
+            ],
+        )
+        self.assertEqual(
+            (
+                dedent(
+                    """\
+                Acquire::http::Proxy "http://apt-proxy.example:3128/";;
+                """
+                ).encode("UTF-8"),
+                stat.S_IFREG | 0o644,
+            ),
+            override_sources_list.backend.backend_fs[
+                "/etc/apt/apt.conf.d/99proxy"
+            ],
+        )
+
+
+# Output of:
+#     gpg --no-default-keyring \
+#         --keyring /usr/share/keyrings/ubuntu-archive-keyring.gpg \
+#         --armor --export --export-options export-minimal,export-clean \
+#         F6ECB3762474EDA9D21B7022871920D1991BC93C
+# (For test purposes, the exact key ID isn't particularly important.  This
+# just needs to be some kind of valid GPG public key.)
+TEST_GPG_KEY = dedent(
+    """\
+    -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+    mQINBFufwdoBEADv/Gxytx/LcSXYuM0MwKojbBye81s0G1nEx+lz6VAUpIUZnbkq
+    dXBHC+dwrGS/CeeLuAjPRLU8AoxE/jjvZVp8xFGEWHYdklqXGZ/gJfP5d3fIUBtZ
+    HZEJl8B8m9pMHf/AQQdsC+YzizSG5t5Mhnotw044LXtdEEkx2t6Jz0OGrh+5Ioxq
+    X7pZiq6Cv19BohaUioKMdp7ES6RYfN7ol6HSLFlrMXtVfh/ijpN9j3ZhVGVeRC8k
+    KHQsJ5PkIbmvxBiUh7SJmfZUx0IQhNMaDHXfdZAGNtnhzzNReb1FqNLSVkrS/Pns
+    AQzMhG1BDm2VOSF64jebKXffFqM5LXRQTeqTLsjUbbrqR6s/GCO8UF7jfUj6I7ta
+    LygmsHO/JD4jpKRC0gbpUBfaiJyLvuepx3kWoqL3sN0LhlMI80+fA7GTvoOx4tpq
+    VlzlE6TajYu+jfW3QpOFS5ewEMdL26hzxsZg/geZvTbArcP+OsJKRmhv4kNo6Ayd
+    yHQ/3ZV/f3X9mT3/SPLbJaumkgp3Yzd6t5PeBu+ZQk/mN5WNNuaihNEV7llb1Zhv
+    Y0Fxu9BVd/BNl0rzuxp3rIinB2TX2SCg7wE5xXkwXuQ/2eTDE0v0HlGntkuZjGow
+    DZkxHZQSxZVOzdZCRVaX/WEFLpKa2AQpw5RJrQ4oZ/OfifXyJzP27o03wQARAQAB
+    tEJVYnVudHUgQXJjaGl2ZSBBdXRvbWF0aWMgU2lnbmluZyBLZXkgKDIwMTgpIDxm
+    dHBtYXN0ZXJAdWJ1bnR1LmNvbT6JAjgEEwEKACIFAlufwdoCGwMGCwkIBwMCBhUI
+    AgkKCwQWAgMBAh4BAheAAAoJEIcZINGZG8k8LHMQAKS2cnxz/5WaoCOWArf5g6UH
+    beOCgc5DBm0hCuFDZWWv427aGei3CPuLw0DGLCXZdyc5dqE8mvjMlOmmAKKlj1uG
+    g3TYCbQWjWPeMnBPZbkFgkZoXJ7/6CB7bWRht1sHzpt1LTZ+SYDwOwJ68QRp7DRa
+    Zl9Y6QiUbeuhq2DUcTofVbBxbhrckN4ZteLvm+/nG9m/ciopc66LwRdkxqfJ32Cy
+    q+1TS5VaIJDG7DWziG+Kbu6qCDM4QNlg3LH7p14CrRxAbc4lvohRgsV4eQqsIcdF
+    kuVY5HPPj2K8TqpY6STe8Gh0aprG1RV8ZKay3KSMpnyV1fAKn4fM9byiLzQAovC0
+    LZ9MMMsrAS/45AvC3IEKSShjLFn1X1dRCiO6/7jmZEoZtAp53hkf8SMBsi78hVNr
+    BumZwfIdBA1v22+LY4xQK8q4XCoRcA9G+pvzU9YVW7cRnDZZGl0uwOw7z9PkQBF5
+    KFKjWDz4fCk+K6+YtGpovGKekGBb8I7EA6UpvPgqA/QdI0t1IBP0N06RQcs1fUaA
+    QEtz6DGy5zkRhR4pGSZn+dFET7PdAjEK84y7BdY4t+U1jcSIvBj0F2B7LwRL7xGp
+    SpIKi/ekAXLs117bvFHaCvmUYN7JVp1GMmVFxhIdx6CFm3fxG8QjNb5tere/YqK+
+    uOgcXny1UlwtCUzlrSaP
+    =9AdM
+    -----END PGP PUBLIC KEY BLOCK-----
+    """
+)
+
+
+class TestAddTrustedKeys(TestCase):
+    def test_add_trusted_keys(self):
+        args = [
+            "add-trusted-keys",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+        ]
+        add_trusted_keys = parse_args(args=args).operation
+        with tempfile.NamedTemporaryFile(mode="wb+") as keys_file:
+            keys_file.write(TEST_GPG_KEY.encode())
+            keys_file.seek(0)
+            add_trusted_keys.input_file = keys_file
+            with tempfile.NamedTemporaryFile(mode="wb+") as show_keys_file:
+                add_trusted_keys.show_keys_file = show_keys_file
+                self.assertEqual(0, add_trusted_keys.run())
+                expected_dearmored_key = subprocess.run(
+                    [
+                        "gpg",
+                        "--ignore-time-conflict",
+                        "--no-options",
+                        "--no-keyring",
+                        "--dearmor",
+                    ],
+                    input=TEST_GPG_KEY.encode(),
+                    capture_output=True,
+                ).stdout
+                self.assertEqual(
+                    (expected_dearmored_key, stat.S_IFREG | 0o644),
+                    add_trusted_keys.backend.backend_fs[
+                        "/etc/apt/trusted.gpg.d/launchpad-buildd.gpg"
+                    ],
+                )
+                show_keys_file.seek(0)
+                self.assertIn(
+                    "Key fingerprint = F6EC B376 2474 EDA9 D21B  "
+                    "7022 8719 20D1 991B C93C",
+                    show_keys_file.read().decode(),
+                )
+
+
+class RanAptGet(MatchesListwise):
+    def __init__(self, args_list):
+        super().__init__(
+            [
+                MatchesListwise(
+                    [
+                        Equals((["/usr/bin/apt-get"] + args,)),
+                        ContainsDict(
+                            {
+                                "env": MatchesDict(
+                                    {
+                                        "LANG": Equals("C"),
+                                        "DEBIAN_FRONTEND": Equals(
+                                            "noninteractive"
+                                        ),
+                                        "TTY": Equals("unknown"),
+                                    }
+                                ),
+                            }
+                        ),
+                    ]
+                )
+                for args in args_list
+            ]
+        )
+
+
+class TestUpdate(TestCase):
+    def test_succeeds(self):
+        self.useFixture(FakeTime())
+        start_time = time.time()
+        args = [
+            "update-debian-chroot",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+        ]
+        update = parse_args(args=args).operation
+        self.assertEqual(0, update.run())
+
+        expected_args = [
+            ["-uy", "update"],
+            [
+                "-o",
+                "DPkg::Options::=--force-confold",
+                "-uy",
+                "--purge",
+                "dist-upgrade",
+            ],
+        ]
+        self.assertThat(update.backend.run.calls, RanAptGet(expected_args))
+        self.assertEqual(start_time, time.time())
+
+    def test_first_run_fails(self):
+        class FailFirstTime(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if len(self.calls) == 1:
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        logger = self.useFixture(FakeLogger())
+        self.useFixture(FakeTime())
+        start_time = time.time()
+        args = [
+            "update-debian-chroot",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+        ]
+        update = parse_args(args=args).operation
+        update.backend.run = FailFirstTime()
+        self.assertEqual(0, update.run())
+
+        expected_args = [
+            ["-uy", "update"],
+            ["-uy", "update"],
+            [
+                "-o",
+                "DPkg::Options::=--force-confold",
+                "-uy",
+                "--purge",
+                "dist-upgrade",
+            ],
+        ]
+        self.assertThat(update.backend.run.calls, RanAptGet(expected_args))
+        self.assertEqual(
+            "Updating target for build 1\n"
+            "Waiting 15 seconds and trying again ...\n",
+            logger.output,
+        )
+        self.assertEqual(start_time + 15, time.time())
diff --git a/lpbuildd/target/tests/test_backend.py b/lpbuildd/target/tests/test_backend.py
new file mode 100644
index 0000000..1db7b55
--- /dev/null
+++ b/lpbuildd/target/tests/test_backend.py
@@ -0,0 +1,37 @@
+# Copyright 2022 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+from unittest.mock import ANY, patch
+
+from fixtures import TempDir
+from testtools import TestCase
+
+from lpbuildd.tests.fakebuilder import UncontainedBackend
+
+
+class TestBackend(TestCase):
+    def test_open(self):
+        backend = UncontainedBackend("1")
+        backend_root = self.useFixture(TempDir())
+        target_path = backend_root.join("test.txt")
+
+        with patch.object(
+            backend, "copy_in", wraps=backend.copy_in
+        ) as copy_in, patch.object(
+            backend, "copy_out", wraps=backend.copy_out
+        ) as copy_out:
+            with backend.open(target_path, "w") as f:
+                f.write("text")
+
+            copy_out.assert_not_called()
+            copy_in.assert_called_once_with(ANY, target_path)
+
+            self.assertTrue(backend.path_exists(target_path))
+
+            copy_in.reset_mock()
+            copy_out.reset_mock()
+
+            with backend.open(target_path, "r") as f:
+                self.assertEqual(f.read(), "text")
+
+            copy_in.assert_not_called()
+            copy_out.assert_called_once_with(target_path, ANY)
diff --git a/lpbuildd/target/tests/test_build_charm.py b/lpbuildd/target/tests/test_build_charm.py
new file mode 100644
index 0000000..9f98fa0
--- /dev/null
+++ b/lpbuildd/target/tests/test_build_charm.py
@@ -0,0 +1,764 @@
+# Copyright 2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import json
+import os
+import stat
+import subprocess
+from textwrap import dedent
+
+import responses
+from fixtures import FakeLogger, TempDir
+from systemfixtures import FakeFilesystem
+from testtools.matchers import AnyMatch, MatchesAll, MatchesListwise
+from testtools.testcase import TestCase
+
+from lpbuildd.target.backend import InvalidBuildFilePath
+from lpbuildd.target.build_charm import (
+    RETCODE_FAILURE_BUILD,
+    RETCODE_FAILURE_INSTALL,
+)
+from lpbuildd.target.cli import parse_args
+from lpbuildd.target.tests.matchers import (
+    RanAptGet,
+    RanBuildCommand,
+    RanCommand,
+)
+from lpbuildd.target.tests.test_build_snap import FakeRevisionID, RanSnap
+from lpbuildd.tests.fakebuilder import FakeMethod
+
+
+class TestBuildCharm(TestCase):
+    def test_run_build_command_no_env(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.run_build_command(["echo", "hello world"])
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["echo", "hello world"], cwd="/home/buildd/test-image"
+                    ),
+                ]
+            ),
+        )
+
+    def test_run_build_command_env(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.run_build_command(
+            ["echo", "hello world"], env={"FOO": "bar baz"}
+        )
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["echo", "hello world"],
+                        FOO="bar baz",
+                        cwd="/home/buildd/test-image",
+                    )
+                ]
+            ),
+        )
+
+    def test_install_channels(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--channel=core=candidate",
+            "--channel=core18=beta",
+            "--channel=charmcraft=edge",
+            "--branch",
+            "lp:foo",
+            "test-snap",
+        ]
+        build_snap = parse_args(args=args).operation
+        build_snap.install()
+        self.assertThat(
+            build_snap.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet(
+                        "install", "bzr", "python3-pip", "python3-setuptools"
+                    ),
+                    RanSnap("install", "--channel=candidate", "core"),
+                    RanSnap("install", "--channel=beta", "core18"),
+                    RanSnap(
+                        "install", "--classic", "--channel=edge", "charmcraft"
+                    ),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    def test_install_bzr(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.install()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet(
+                        "install", "bzr", "python3-pip", "python3-setuptools"
+                    ),
+                    RanSnap("install", "--classic", "charmcraft"),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    def test_install_git(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.install()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet(
+                        "install", "git", "python3-pip", "python3-setuptools"
+                    ),
+                    RanSnap("install", "--classic", "charmcraft"),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    @responses.activate
+    def test_install_snap_store_proxy(self):
+        store_assertion = dedent(
+            """\
+            type: store
+            store: store-id
+            url: http://snap-store-proxy.example
+
+            body
+            """
+        )
+
+        def respond(request):
+            return 200, {"X-Assertion-Store-Id": "store-id"}, store_assertion
+
+        responses.add_callback(
+            "GET",
+            "http://snap-store-proxy.example/v2/auth/store/assertions";,
+            callback=respond,
+        )
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--snap-store-proxy-url",
+            "http://snap-store-proxy.example/";,
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.install()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet(
+                        "install", "git", "python3-pip", "python3-setuptools"
+                    ),
+                    RanCommand(
+                        ["snap", "ack", "/dev/stdin"],
+                        input_text=store_assertion,
+                    ),
+                    RanCommand(
+                        ["snap", "set", "core", "proxy.store=store-id"]
+                    ),
+                    RanSnap("install", "--classic", "charmcraft"),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    def test_install_proxy(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--proxy-url",
+            "http://proxy.example:3128/";,
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.bin = "/builderbin"
+        self.useFixture(FakeFilesystem()).add("/builderbin")
+        os.mkdir("/builderbin")
+        with open("/builderbin/lpbuildd-git-proxy", "w") as proxy_script:
+            proxy_script.write("proxy script\n")
+            os.fchmod(proxy_script.fileno(), 0o755)
+        build_charm.install()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet(
+                        "install",
+                        "python3",
+                        "socat",
+                        "git",
+                        "python3-pip",
+                        "python3-setuptools",
+                    ),
+                    RanSnap("install", "--classic", "charmcraft"),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+        self.assertEqual(
+            (b"proxy script\n", stat.S_IFREG | 0o755),
+            build_charm.backend.backend_fs[
+                "/usr/local/bin/lpbuildd-git-proxy"
+            ],
+        )
+
+    def test_repo_bzr(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("42")
+        build_charm.repo()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["bzr", "branch", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    ),
+                    RanBuildCommand(
+                        ["bzr", "revno"],
+                        cwd="/home/buildd/test-image",
+                        get_output=True,
+                        universal_newlines=True,
+                    ),
+                ]
+            ),
+        )
+        status_path = os.path.join(build_charm.backend.build_path, "status")
+        with open(status_path) as status:
+            self.assertEqual({"revision_id": "42"}, json.load(status))
+
+    def test_repo_git(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("0" * 40)
+        build_charm.repo()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["git", "clone", "-n", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    ),
+                    RanBuildCommand(
+                        ["git", "checkout", "-q", "HEAD"],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "submodule",
+                            "update",
+                            "--init",
+                            "--recursive",
+                        ],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        ["git", "rev-parse", "HEAD^{}"],
+                        cwd="/home/buildd/test-image",
+                        get_output=True,
+                        universal_newlines=True,
+                    ),
+                ]
+            ),
+        )
+        status_path = os.path.join(build_charm.backend.build_path, "status")
+        with open(status_path) as status:
+            self.assertEqual({"revision_id": "0" * 40}, json.load(status))
+
+    def test_repo_git_with_path(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--git-path",
+            "next",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("0" * 40)
+        build_charm.repo()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["git", "clone", "-n", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    ),
+                    RanBuildCommand(
+                        ["git", "checkout", "-q", "next"],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "submodule",
+                            "update",
+                            "--init",
+                            "--recursive",
+                        ],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        ["git", "rev-parse", "next^{}"],
+                        cwd="/home/buildd/test-image",
+                        get_output=True,
+                        universal_newlines=True,
+                    ),
+                ]
+            ),
+        )
+        status_path = os.path.join(build_charm.backend.build_path, "status")
+        with open(status_path) as status:
+            self.assertEqual({"revision_id": "0" * 40}, json.load(status))
+
+    def test_repo_git_with_tag_path(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--git-path",
+            "refs/tags/1.0",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("0" * 40)
+        build_charm.repo()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["git", "clone", "-n", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    ),
+                    RanBuildCommand(
+                        ["git", "checkout", "-q", "refs/tags/1.0"],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "submodule",
+                            "update",
+                            "--init",
+                            "--recursive",
+                        ],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        ["git", "rev-parse", "refs/tags/1.0^{}"],
+                        cwd="/home/buildd/test-image",
+                        get_output=True,
+                        universal_newlines=True,
+                    ),
+                ]
+            ),
+        )
+        status_path = os.path.join(build_charm.backend.build_path, "status")
+        with open(status_path) as status:
+            self.assertEqual({"revision_id": "0" * 40}, json.load(status))
+
+    def test_repo_proxy(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--proxy-url",
+            "http://proxy.example:3128/";,
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("0" * 40)
+        build_charm.repo()
+        env = {
+            "http_proxy": "http://proxy.example:3128/";,
+            "https_proxy": "http://proxy.example:3128/";,
+            "GIT_PROXY_COMMAND": "/usr/local/bin/lpbuildd-git-proxy",
+            "SNAPPY_STORE_NO_CDN": "1",
+        }
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["git", "clone", "-n", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                        **env,
+                    ),
+                    RanBuildCommand(
+                        ["git", "checkout", "-q", "HEAD"],
+                        cwd="/home/buildd/test-image",
+                        **env,
+                    ),
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "submodule",
+                            "update",
+                            "--init",
+                            "--recursive",
+                        ],
+                        cwd="/home/buildd/test-image",
+                        **env,
+                    ),
+                    RanBuildCommand(
+                        ["git", "rev-parse", "HEAD^{}"],
+                        cwd="/home/buildd/test-image",
+                        get_output=True,
+                        universal_newlines=True,
+                    ),
+                ]
+            ),
+        )
+        status_path = os.path.join(build_charm.backend.build_path, "status")
+        with open(status_path) as status:
+            self.assertEqual({"revision_id": "0" * 40}, json.load(status))
+
+    def test_build(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.add_dir("/build/test-directory")
+        build_charm.build()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["charmcraft", "pack", "-v", "--destructive-mode"],
+                        cwd="/home/buildd/test-image/.",
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_with_path(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "--build-path",
+            "build-aux/",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.add_dir("/build/test-directory")
+        build_charm.build()
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["charmcraft", "pack", "-v", "--destructive-mode"],
+                        cwd="/home/buildd/test-image/build-aux/",
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_proxy(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "--proxy-url",
+            "http://proxy.example:3128/";,
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.build()
+        env = {
+            "http_proxy": "http://proxy.example:3128/";,
+            "https_proxy": "http://proxy.example:3128/";,
+            "GIT_PROXY_COMMAND": "/usr/local/bin/lpbuildd-git-proxy",
+            "SNAPPY_STORE_NO_CDN": "1",
+        }
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["charmcraft", "pack", "-v", "--destructive-mode"],
+                        cwd="/home/buildd/test-image/.",
+                        **env,
+                    ),
+                ]
+            ),
+        )
+
+    def test_run_succeeds(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FakeRevisionID("42")
+        self.assertEqual(0, build_charm.run())
+        self.assertThat(
+            build_charm.backend.run.calls,
+            MatchesAll(
+                AnyMatch(
+                    RanAptGet(
+                        "install", "bzr", "python3-pip", "python3-setuptools"
+                    ),
+                ),
+                AnyMatch(
+                    RanBuildCommand(
+                        ["bzr", "branch", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    )
+                ),
+                AnyMatch(
+                    RanBuildCommand(
+                        ["charmcraft", "pack", "-v", "--destructive-mode"],
+                        cwd="/home/buildd/test-image/.",
+                    )
+                ),
+            ),
+        )
+
+    def test_run_install_fails(self):
+        class FailInstall(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if run_args[0] == "apt-get":
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        self.useFixture(FakeLogger())
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.run = FailInstall()
+        self.assertEqual(RETCODE_FAILURE_INSTALL, build_charm.run())
+
+    def test_run_repo_fails(self):
+        class FailRepo(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if run_args[:2] == ["bzr", "branch"]:
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        self.useFixture(FakeLogger())
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.run = FailRepo()
+        self.assertEqual(RETCODE_FAILURE_BUILD, build_charm.run())
+
+    def test_run_build_fails(self):
+        class FailBuild(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if run_args[0] == "charmcraft":
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        self.useFixture(FakeLogger())
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.build_path = self.useFixture(TempDir()).path
+        build_charm.backend.run = FailBuild()
+        self.assertEqual(RETCODE_FAILURE_BUILD, build_charm.run())
+
+    def test_build_with_invalid_build_path_parent(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "--build-path",
+            "../",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.add_dir("/build/test-directory")
+        self.assertRaises(InvalidBuildFilePath, build_charm.build)
+
+    def test_build_with_invalid_build_path_absolute(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "--build-path",
+            "/etc",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.backend.add_dir("/build/test-directory")
+        self.assertRaises(InvalidBuildFilePath, build_charm.build)
+
+    def test_build_with_invalid_build_path_symlink(self):
+        args = [
+            "build-charm",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "--build-path",
+            "build/",
+            "test-image",
+        ]
+        build_charm = parse_args(args=args).operation
+        build_charm.buildd_path = self.useFixture(TempDir()).path
+        os.symlink(
+            "/etc/hosts", os.path.join(build_charm.buildd_path, "build")
+        )
+        self.assertRaises(InvalidBuildFilePath, build_charm.build)
diff --git a/lpbuildd/target/tests/test_build_livefs.py b/lpbuildd/target/tests/test_build_livefs.py
new file mode 100644
index 0000000..ad220c3
--- /dev/null
+++ b/lpbuildd/target/tests/test_build_livefs.py
@@ -0,0 +1,442 @@
+# Copyright 2017-2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import subprocess
+from textwrap import dedent
+
+import responses
+from fixtures import FakeLogger
+from testtools import TestCase
+from testtools.matchers import AnyMatch, MatchesAll, MatchesListwise
+
+from lpbuildd.target.build_livefs import (
+    RETCODE_FAILURE_BUILD,
+    RETCODE_FAILURE_INSTALL,
+)
+from lpbuildd.target.cli import parse_args
+from lpbuildd.target.tests.matchers import (
+    RanAptGet,
+    RanBuildCommand,
+    RanCommand,
+)
+from lpbuildd.tests.fakebuilder import FakeMethod
+
+
+class TestBuildLiveFS(TestCase):
+    def test_install(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.install()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "livecd-rootfs"),
+                ]
+            ),
+        )
+
+    def test_install_locale(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--locale=zh_CN",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.install()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "livecd-rootfs"),
+                    RanAptGet(
+                        "--install-recommends",
+                        "install",
+                        "ubuntu-defaults-builder",
+                    ),
+                ]
+            ),
+        )
+
+    @responses.activate
+    def test_install_snap_store_proxy(self):
+        store_assertion = dedent(
+            """\
+            type: store
+            store: store-id
+            url: http://snap-store-proxy.example
+
+            body
+            """
+        )
+
+        def respond(request):
+            return 200, {"X-Assertion-Store-Id": "store-id"}, store_assertion
+
+        responses.add_callback(
+            "GET",
+            "http://snap-store-proxy.example/v2/auth/store/assertions";,
+            callback=respond,
+        )
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--snap-store-proxy-url",
+            "http://snap-store-proxy.example/";,
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.install()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "livecd-rootfs"),
+                    RanCommand(
+                        ["snap", "ack", "/dev/stdin"],
+                        input_text=store_assertion,
+                    ),
+                    RanCommand(
+                        ["snap", "set", "core", "proxy.store=store-id"]
+                    ),
+                ]
+            ),
+        )
+
+    def test_build(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.build()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(["rm", "-rf", "auto", "local"]),
+                    RanBuildCommand(["mkdir", "-p", "auto"]),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/config",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/build",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/clean",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(["lb", "clean", "--purge"]),
+                    RanBuildCommand(
+                        ["lb", "config"],
+                        PROJECT="ubuntu",
+                        ARCH="amd64",
+                        SUITE="xenial",
+                    ),
+                    RanBuildCommand(
+                        ["lb", "build"], PROJECT="ubuntu", ARCH="amd64"
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_locale(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--locale=zh_CN",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.build()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        [
+                            "ubuntu-defaults-image",
+                            "--locale",
+                            "zh_CN",
+                            "--arch",
+                            "amd64",
+                            "--release",
+                            "xenial",
+                        ]
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_extra_ppas_and_snaps(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu-core",
+            "--extra-ppa=owner1/name1",
+            "--extra-ppa=owner2/name2",
+            "--extra-snap=snap1",
+            "--extra-snap=snap2",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.build()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(["rm", "-rf", "auto", "local"]),
+                    RanBuildCommand(["mkdir", "-p", "auto"]),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/config",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/build",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/clean",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(["lb", "clean", "--purge"]),
+                    RanBuildCommand(
+                        ["lb", "config"],
+                        PROJECT="ubuntu-core",
+                        ARCH="amd64",
+                        SUITE="xenial",
+                        EXTRA_PPAS="owner1/name1 owner2/name2",
+                        EXTRA_SNAPS="snap1 snap2",
+                    ),
+                    RanBuildCommand(
+                        ["lb", "build"], PROJECT="ubuntu-core", ARCH="amd64"
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_debug(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu",
+            "--debug",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.build()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(["rm", "-rf", "auto", "local"]),
+                    RanBuildCommand(["mkdir", "-p", "auto"]),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/config",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/build",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/clean",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(["mkdir", "-p", "local/functions"]),
+                    RanBuildCommand(
+                        ["sh", "-c", "echo 'set -x' >local/functions/debug.sh"]
+                    ),
+                    RanBuildCommand(["lb", "clean", "--purge"]),
+                    RanBuildCommand(
+                        ["lb", "config"],
+                        PROJECT="ubuntu",
+                        ARCH="amd64",
+                        SUITE="xenial",
+                    ),
+                    RanBuildCommand(
+                        ["lb", "build"], PROJECT="ubuntu", ARCH="amd64"
+                    ),
+                ]
+            ),
+        )
+
+    def test_build_with_http_proxy(self):
+        proxy = "http://example.com:8000";
+        expected_env = {
+            "PROJECT": "ubuntu-cpc",
+            "ARCH": "amd64",
+            "http_proxy": proxy,
+            "LB_APT_HTTP_PROXY": proxy,
+        }
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu-cpc",
+            f"--http-proxy={proxy}",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.build()
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(["rm", "-rf", "auto", "local"]),
+                    RanBuildCommand(["mkdir", "-p", "auto"]),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/config",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/build",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(
+                        [
+                            "ln",
+                            "-s",
+                            "/usr/share/livecd-rootfs/live-build/auto/clean",
+                            "auto/",
+                        ]
+                    ),
+                    RanBuildCommand(["lb", "clean", "--purge"]),
+                    RanBuildCommand(
+                        ["lb", "config"], SUITE="xenial", **expected_env
+                    ),
+                    RanBuildCommand(["lb", "build"], **expected_env),
+                ]
+            ),
+        )
+
+    def test_run_succeeds(self):
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu",
+        ]
+        build_livefs = parse_args(args=args).operation
+        self.assertEqual(0, build_livefs.run())
+        self.assertThat(
+            build_livefs.backend.run.calls,
+            MatchesAll(
+                AnyMatch(RanAptGet("install", "livecd-rootfs")),
+                AnyMatch(
+                    RanBuildCommand(
+                        ["lb", "build"], PROJECT="ubuntu", ARCH="amd64"
+                    )
+                ),
+            ),
+        )
+
+    def test_run_install_fails(self):
+        class FailInstall(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if run_args[0] == "apt-get":
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        self.useFixture(FakeLogger())
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.backend.run = FailInstall()
+        self.assertEqual(RETCODE_FAILURE_INSTALL, build_livefs.run())
+
+    def test_run_build_fails(self):
+        class FailBuild(FakeMethod):
+            def __call__(self, run_args, *args, **kwargs):
+                super().__call__(run_args, *args, **kwargs)
+                if run_args[0] == "rm":
+                    raise subprocess.CalledProcessError(1, run_args)
+
+        self.useFixture(FakeLogger())
+        args = [
+            "buildlivefs",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--project=ubuntu",
+        ]
+        build_livefs = parse_args(args=args).operation
+        build_livefs.backend.run = FailBuild()
+        self.assertEqual(RETCODE_FAILURE_BUILD, build_livefs.run())
diff --git a/lpbuildd/target/tests/test_build_oci.py b/lpbuildd/target/tests/test_build_oci.py
new file mode 100644
index 0000000..116d343
--- /dev/null
+++ b/lpbuildd/target/tests/test_build_oci.py
@@ -0,0 +1,901 @@
+# Copyright 2019 Canonical Ltd.  This software is licensed under the
+# GNU Affero General Public License version 3 (see the file LICENSE).
+
+import os.path
+import stat
+import subprocess
+from textwrap import dedent
+
+import responses
+from fixtures import FakeLogger, TempDir
+from systemfixtures import FakeFilesystem
+from testtools import TestCase
+from testtools.matchers import AnyMatch, MatchesAll, MatchesListwise
+
+from lpbuildd.target.backend import InvalidBuildFilePath
+from lpbuildd.target.build_oci import (
+    RETCODE_FAILURE_BUILD,
+    RETCODE_FAILURE_INSTALL,
+)
+from lpbuildd.target.cli import parse_args
+from lpbuildd.target.tests.matchers import (
+    RanAptGet,
+    RanBuildCommand,
+    RanCommand,
+)
+from lpbuildd.tests.fakebuilder import FakeMethod
+
+
+class TestBuildOCI(TestCase):
+    def test_run_build_command_no_env(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.run_build_command(["echo", "hello world"])
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["echo", "hello world"], cwd="/home/buildd/test-image"
+                    ),
+                ]
+            ),
+        )
+
+    def test_run_build_command_env(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.run_build_command(
+            ["echo", "hello world"], env={"FOO": "bar baz"}
+        )
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["echo", "hello world"],
+                        FOO="bar baz",
+                        cwd="/home/buildd/test-image",
+                    )
+                ]
+            ),
+        )
+
+    def test_install_bzr(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.install()
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "bzr", "docker.io"),
+                    RanCommand(["systemctl", "restart", "docker"]),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    def test_install_git(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.install()
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "git", "docker.io"),
+                    RanCommand(["systemctl", "restart", "docker"]),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+
+    @responses.activate
+    def test_install_snap_store_proxy(self):
+        store_assertion = dedent(
+            """\
+            type: store
+            store: store-id
+            url: http://snap-store-proxy.example
+
+            body
+            """
+        )
+
+        def respond(request):
+            return 200, {"X-Assertion-Store-Id": "store-id"}, store_assertion
+
+        responses.add_callback(
+            "GET",
+            "http://snap-store-proxy.example/v2/auth/store/assertions";,
+            callback=respond,
+        )
+        args = [
+            "buildsnap",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--snap-store-proxy-url",
+            "http://snap-store-proxy.example/";,
+            "test-snap",
+        ]
+        build_snap = parse_args(args=args).operation
+        build_snap.install()
+        self.assertThat(
+            build_snap.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanAptGet("install", "git", "snapcraft"),
+                    RanCommand(
+                        ["snap", "ack", "/dev/stdin"],
+                        input_text=store_assertion,
+                    ),
+                    RanCommand(
+                        ["snap", "set", "core", "proxy.store=store-id"]
+                    ),
+                ]
+            ),
+        )
+
+    def test_install_proxy(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--proxy-url",
+            "http://proxy.example:3128/";,
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.bin = "/builderbin"
+        self.useFixture(FakeFilesystem()).add("/builderbin")
+        os.mkdir("/builderbin")
+        with open("/builderbin/lpbuildd-git-proxy", "w") as proxy_script:
+            proxy_script.write("proxy script\n")
+            os.fchmod(proxy_script.fileno(), 0o755)
+        build_oci.install()
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanCommand(
+                        ["mkdir", "-p", "/etc/systemd/system/docker.service.d"]
+                    ),
+                    RanAptGet(
+                        "install", "python3", "socat", "git", "docker.io"
+                    ),
+                    RanCommand(["systemctl", "restart", "docker"]),
+                    RanCommand(["mkdir", "-p", "/home/buildd"]),
+                ]
+            ),
+        )
+        self.assertEqual(
+            (b"proxy script\n", stat.S_IFREG | 0o755),
+            build_oci.backend.backend_fs["/usr/local/bin/lpbuildd-git-proxy"],
+        )
+
+    def test_repo_bzr(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--branch",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.backend.build_path = self.useFixture(TempDir()).path
+        build_oci.backend.run = FakeMethod()
+        build_oci.repo()
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        ["bzr", "branch", "lp:foo", "test-image"],
+                        cwd="/home/buildd",
+                    ),
+                ]
+            ),
+        )
+
+    def test_repo_git(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.backend.build_path = self.useFixture(TempDir()).path
+        build_oci.backend.run = FakeMethod()
+        build_oci.repo()
+        self.assertThat(
+            build_oci.backend.run.calls,
+            MatchesListwise(
+                [
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "clone",
+                            "-n",
+                            "--depth",
+                            "1",
+                            "--no-single-branch",
+                            "lp:foo",
+                            "test-image",
+                        ],
+                        cwd="/home/buildd",
+                    ),
+                    RanBuildCommand(
+                        ["git", "checkout", "-q", "HEAD"],
+                        cwd="/home/buildd/test-image",
+                    ),
+                    RanBuildCommand(
+                        [
+                            "git",
+                            "submodule",
+                            "update",
+                            "--init",
+                            "--recursive",
+                        ],
+                        cwd="/home/buildd/test-image",
+                    ),
+                ]
+            ),
+        )
+
+    def test_repo_git_with_path(self):
+        args = [
+            "build-oci",
+            "--backend=fake",
+            "--series=xenial",
+            "--arch=amd64",
+            "1",
+            "--git-repository",
+            "lp:foo",
+            "--git-path",
+            "next",
+            "test-image",
+        ]
+        build_oci = parse_args(args=args).operation
+        build_oci.backend.build_path = self.useFixture(TempDir(

Follow ups