← Back to team overview

duplicity-team team mailing list archive

[Merge] lp:~kevinoid/duplicity/windows-port into lp:duplicity/0.6-series

 

Kevin Locke has proposed merging lp:~kevinoid/duplicity/windows-port into lp:duplicity/0.6-series.

Requested reviews:
  duplicity-team (duplicity-team)
Related bugs:
  #451582 Wishlist: Native Windows Support
  https://bugs.launchpad.net/bugs/451582
  #637556 os.execve should get passed program as first argument
  https://bugs.launchpad.net/bugs/637556


This branch includes changes to support running Duplicity natively on Windows, as requested in bug 451582.  I have done my best to separate out each change into logical units for commits and provide a detailed explanation and rationale for each change in the commit message.

The current work is only intended to port the main functionality of Duplicity and the local backend.  The other backends have not been tested (and several, particularly ssh, are known not to work on Windows).

Most of the commits should not change any of the functionality of Duplicity.  However, you may wish to take particular notice of revision 677 and 698 which do introduce functionality changes.

Note:  Revision 682 added some portability improvements for the restart process and fixed bug 637556 in the process.
-- 
https://code.launchpad.net/~kevinoid/duplicity/windows-port/+merge/39287
Your team duplicity-team is requested to review the proposed merge of lp:~kevinoid/duplicity/windows-port into lp:duplicity/0.6-series.
=== modified file 'dist/makedist'
--- dist/makedist	2010-07-22 19:15:11 +0000
+++ dist/makedist	2010-10-25 15:49:45 +0000
@@ -20,14 +20,14 @@
 # along with duplicity; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
-import os, re, shutil, time, sys
+import os, re, shutil, subprocess, tarfile, traceback, time, sys, zipfile
 
 SourceDir = "duplicity"
 DistDir = "dist"
 
 # Various details about the files must also be specified by the rpm
 # spec template.
-spec_template = "dist/duplicity.spec.template"
+spec_template = os.path.join("dist", "duplicity.spec.template")
 
 def VersionedCopy(source, dest):
     """
@@ -48,14 +48,13 @@
     fout.write(outbuf)
     assert not fout.close()
 
-def MakeTar():
+def MakeArchives():
     """Create duplicity tar file"""
     tardir = "duplicity-%s" % Version
-    tarfile = "duplicity-%s.tar.gz" % Version
-    try:
-        os.lstat(tardir)
-        os.system("rm -rf " + tardir)
-    except OSError: pass
+    tarname = "duplicity-%s.tar.gz" % Version
+    zipname = "duplicity-%s.zip" % Version
+    if os.path.exists(tardir):
+        shutil.rmtree(tardir)
 
     os.mkdir(tardir)
     for filename in [
@@ -66,10 +65,10 @@
         "LOG-README",
         "README",
         "tarfile-LICENSE",
-        SourceDir + "/_librsyncmodule.c",
-        DistDir + "/setup.py",
+        os.path.join(SourceDir, "_librsyncmodule.c"),
+        os.path.join(DistDir, "setup.py"),
         ]:
-        assert not os.system("cp %s %s" % (filename, tardir)), filename
+        shutil.copy(filename, tardir)
 
     os.mkdir(os.path.join(tardir, "src"))
     for filename in [
@@ -104,39 +103,46 @@
         "urlparse_2_5.py",
         "util.py",
         ]:
-        assert not os.system("cp %s/%s %s/src" % (SourceDir, filename, tardir)), filename
+        shutil.copy(os.path.join(SourceDir, filename),
+                    os.path.join(tardir, "src"))
 
     os.mkdir(os.path.join(tardir, "src", "backends"))
     for filename in [
-        "backends/botobackend.py",
-        "backends/cloudfilesbackend.py",
-        "backends/ftpbackend.py",
-        "backends/giobackend.py",
-        "backends/hsibackend.py",
-        "backends/imapbackend.py",
-        "backends/__init__.py",
-        "backends/localbackend.py",
-        "backends/rsyncbackend.py",
-        "backends/sshbackend.py",
-        "backends/tahoebackend.py",
-        "backends/webdavbackend.py",
+        "botobackend.py",
+        "cloudfilesbackend.py",
+        "ftpbackend.py",
+        "giobackend.py",
+        "hsibackend.py",
+        "imapbackend.py",
+        "__init__.py",
+        "localbackend.py",
+        "rsyncbackend.py",
+        "sshbackend.py",
+        "tahoebackend.py",
+        "webdavbackend.py",
         ]:
-        assert not os.system("cp %s/%s %s/src/backends" %
-                             (SourceDir, filename, tardir)), filename
+        shutil.copy(os.path.join(SourceDir, "backends", filename),
+                    os.path.join(tardir, "src", "backends"))
+
+    if subprocess.call([sys.executable, "update-pot.py"], cwd="po") != 0:
+        sys.stderr.write("update-pot.py failed, translation files not updated!\n")
 
     os.mkdir(os.path.join(tardir, "po"))
-    assert not os.system("cd po && ./update-pot")
     for filename in [
         "duplicity.pot",
         ]:
-        assert not os.system("cp po/%s %s/po" % (filename, tardir)), filename
-    linguas = open('po/LINGUAS')
+        shutil.copy(os.path.join("po", filename), os.path.join(tardir, "po"))
+    linguas = open(os.path.join("po", "LINGUAS"))
     for line in linguas:
         langs = line.split()
         for lang in langs:
             assert not os.mkdir(os.path.join(tardir, "po", lang)), lang
-            assert not os.system("cp po/%s.po %s/po/%s" % (lang, tardir, lang)), lang
-            assert not os.system("msgfmt po/%s.po -o %s/po/%s/duplicity.mo" % (lang, tardir, lang)), lang
+            shutil.copy(os.path.join("po", lang + ".po"),
+                        os.path.join(tardir, "po", lang))
+            if os.system("msgfmt %s -o %s" %
+                                 (os.path.join("po", lang + ".po"),
+                                  os.path.join(tardir, "po", lang, "duplicity.mo"))) != 0:
+                sys.stderr.write("Translation for " + lang + " NOT updated!\n")
     linguas.close()
 
     VersionedCopy(os.path.join(SourceDir, "globals.py"),
@@ -154,9 +160,26 @@
 
     os.chmod(os.path.join(tardir, "setup.py"), 0755)
     os.chmod(os.path.join(tardir, "rdiffdir"), 0644)
-    os.system("tar -czf %s %s" % (tarfile, tardir))
+
+    with tarfile.open(tarname, "w:gz") as tar:
+        tar.add(tardir)
+
+    def add_dir_to_zip(dir, zip, clen=None):
+        if clen == None:
+            clen = len(dir)
+
+        for entry in os.listdir(dir):
+            entrypath = os.path.join(dir, entry)
+            if os.path.isdir(entrypath):
+                add_dir_to_zip(entrypath, zip, clen)
+            else:
+                zip.write(entrypath, entrypath[clen:])
+
+    with zipfile.ZipFile(zipname, "w") as zip:
+        add_dir_to_zip(tardir, zip)
+
     shutil.rmtree(tardir)
-    return tarfile
+    return (tarname, zipname)
 
 def MakeSpecFile():
     """Create spec file using spec template"""
@@ -166,8 +189,8 @@
 
 def Main():
     print "Processing version " + Version
-    tarfile = MakeTar()
-    print "Made tar file " + tarfile
+    archives = MakeArchives()
+    print "Made archives: %s" % (archives,)
     specfile = MakeSpecFile()
     print "Made specfile " + specfile
 

=== modified file 'dist/setup.py'
--- dist/setup.py	2010-10-06 14:37:22 +0000
+++ dist/setup.py	2010-10-25 15:49:45 +0000
@@ -31,16 +31,15 @@
 
 incdir_list = libdir_list = None
 
-if os.name == 'posix':
-    LIBRSYNC_DIR = os.environ.get('LIBRSYNC_DIR', '')
-    args = sys.argv[:]
-    for arg in args:
-        if arg.startswith('--librsync-dir='):
-            LIBRSYNC_DIR = arg.split('=')[1]
-            sys.argv.remove(arg)
-    if LIBRSYNC_DIR:
-        incdir_list = [os.path.join(LIBRSYNC_DIR, 'include')]
-        libdir_list = [os.path.join(LIBRSYNC_DIR, 'lib')]
+LIBRSYNC_DIR = os.environ.get('LIBRSYNC_DIR', '')
+args = sys.argv[:]
+for arg in args:
+    if arg.startswith('--librsync-dir='):
+        LIBRSYNC_DIR = arg.split('=')[1]
+        sys.argv.remove(arg)
+if LIBRSYNC_DIR:
+    incdir_list = [os.path.join(LIBRSYNC_DIR, 'include')]
+    libdir_list = [os.path.join(LIBRSYNC_DIR, 'lib')]
 
 data_files = [('share/man/man1',
                ['duplicity.1',

=== modified file 'duplicity-bin'
--- duplicity-bin	2010-08-26 13:01:10 +0000
+++ duplicity-bin	2010-10-25 15:49:45 +0000
@@ -28,11 +28,24 @@
 # any suggestions.
 
 import getpass, gzip, os, sys, time, types
-import traceback, platform, statvfs, resource, re
+import traceback, platform, statvfs, re
 
 import gettext
 gettext.install('duplicity')
 
+try:
+    import resource
+    have_resource = True
+except ImportError:
+    have_resource = False
+
+if sys.platform == "win32":
+    import ctypes
+    import ctypes.util
+    # Not to be confused with Python's msvcrt module which wraps part of msvcrt
+    # Note:  Load same msvcrt as Python to avoid cross-CRT problems
+    ctmsvcrt = ctypes.cdll[ctypes.util.find_msvcrt()]
+
 from duplicity import log
 log.setup()
 
@@ -554,7 +567,10 @@
     @param col_stats: collection status
     """
     if globals.restore_dir:
-        index = tuple(globals.restore_dir.split("/"))
+        index = path.split_all(globals.restore_dir)
+        if index[-1] == "":
+            del index[-1]
+        index = tuple(index)
     else:
         index = ()
     time = globals.restore_time or dup_time.curtime
@@ -994,16 +1010,13 @@
         # First check disk space in temp area.
         tempfile, tempname = tempdir.default().mkstemp()
         os.close(tempfile)
+
         # strip off the temp dir and file
-        tempfs = os.path.sep.join(tempname.split(os.path.sep)[:-2])
-        try:
-            stats = os.statvfs(tempfs)
-        except:
-            log.FatalError(_("Unable to get free space on temp."),
-                           log.ErrorCode.get_freespace_failed)
+        tempfs = os.path.split(os.path.split(tempname)[0])[0]
+
         # Calculate space we need for at least 2 volumes of full or inc
         # plus about 30% of one volume for the signature files.
-        freespace = stats[statvfs.F_FRSIZE] * stats[statvfs.F_BAVAIL]
+        freespace = get_free_space(tempfs)
         needspace = (((globals.async_concurrency + 1) * globals.volsize)
                      + int(0.30 * globals.volsize))
         if freespace < needspace:
@@ -1015,16 +1028,82 @@
 
         # Some environments like Cygwin run with an artificially
         # low value for max open files.  Check for safe number.
+        check_resource_limits()
+
+
+def check_resource_limits():
+    """
+    Check for sufficient resource limits:
+      - enough max open files
+    Attempt to increase limits to sufficient values if insufficient
+    Put out fatal error if not sufficient to run
+
+    Requires the resource module
+
+    @rtype: void
+    @return: void
+    """
+    if have_resource:
         try:
             soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
         except resource.error:
             log.FatalError(_("Unable to get max open files."),
                            log.ErrorCode.get_ulimit_failed)
-        maxopen = min([l for l in (soft, hard) if l > -1])
-        if maxopen < 1024:
-            log.FatalError(_("Max open files of %s is too low, should be >= 1024.\n"
-                             "Use 'ulimit -n 1024' or higher to correct.\n") % (maxopen,),
-                             log.ErrorCode.maxopen_too_low)
+
+        if soft > -1 and soft < 1024 and soft < hard:
+            try:
+                newsoft = min(1024, hard)
+                resource.setrlimit(resource.RLIMIT_NOFILE, (newsoft, hard))
+                soft = newsoft
+            except resource.error:
+                pass
+    elif sys.platform == "win32":
+        # 2048 from http://msdn.microsoft.com/en-us/library/6e3b887c.aspx
+        soft, hard = ctmsvcrt._getmaxstdio(), 2048
+
+        if soft < 1024:
+            newsoft = ctmsvcrt._setmaxstdio(1024)
+            if newsoft > -1:
+                soft = newsoft
+    else:
+        log.FatalError(_("Unable to get max open files."),
+                       log.ErrorCode.get_ulimit_failed)
+
+    maxopen = min([l for l in (soft, hard) if l > -1])
+    if maxopen < 1024:
+        log.FatalError(_("Max open files of %s is too low, should be >= 1024.\n"
+                         "Use 'ulimit -n 1024' or higher to correct.\n") % (maxopen,),
+                         log.ErrorCode.maxopen_too_low)
+
+
+def get_free_space(dir):
+    """
+    Get the free space available in a given directory
+
+    @type dir: string
+    @param dir: directory in which to measure free space
+
+    @rtype: int
+    @return: amount of free space on the filesystem containing dir (in bytes)
+    """
+    if hasattr(os, "statvfs"):
+        try:
+            stats = os.statvfs(dir)
+        except:
+            log.FatalError(_("Unable to get free space on temp."),
+                           log.ErrorCode.get_freespace_failed)
+
+        return stats[statvfs.F_FRSIZE] * stats[statvfs.F_BAVAIL]
+    elif sys.platform == "win32":
+        freespaceull = ctypes.c_ulonglong(0)
+        ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dir),
+                None, None, ctypes.pointer(freespaceull))
+
+        return freespaceull.value
+    else:
+        log.FatalError(_("Unable to get free space on temp."),
+                       log.ErrorCode.get_freespace_failed)
+
 
 def log_startup_parms(verbosity=log.INFO):
     """
@@ -1071,7 +1150,7 @@
                 log.Notice(_("RESTART: The first volume failed to upload before termination.\n"
                              "         Restart is impossible...starting backup from beginning."))
                 self.last_backup.delete()
-                os.execve(sys.argv[0], sys.argv[1:], os.environ)
+                self.execv(sys.executable, sys.argv)
             elif mf_len - self.start_vol > 0:
                 # upload of N vols failed, fix manifest and restart
                 log.Notice(_("RESTART: Volumes %d to %d failed to upload before termination.\n"
@@ -1087,7 +1166,7 @@
                              "         backup then restart the backup from the beginning.") %
                              (mf_len, self.start_vol))
                 self.last_backup.delete()
-                os.execve(sys.argv[0], sys.argv[1:], os.environ)
+                self.execv(sys.executable, sys.argv)
 
 
     def setLastSaved(self, mf):
@@ -1112,7 +1191,7 @@
 
     # if python is run setuid, it's only partway set,
     # so make sure to run with euid/egid of root
-    if os.geteuid() == 0:
+    if hasattr(os, "geteuid") and os.geteuid() == 0:
         # make sure uid/gid match euid/egid
         os.setuid(os.geteuid())
         os.setgid(os.getegid())

=== modified file 'duplicity.1'
--- duplicity.1	2010-08-26 14:11:14 +0000
+++ duplicity.1	2010-10-25 15:49:45 +0000
@@ -871,14 +871,16 @@
 .BR [...] .
 As in a normal shell,
 .B *
-can be expanded to any string of characters not containing "/",
+can be expanded to any string of characters not containing a directory
+separator,
 .B ?
-expands to any character except "/", and
+expands to any character except a directory separator, and
 .B [...]
 expands to a single character of those characters specified (ranges
 are acceptable).  The new special pattern,
 .BR ** ,
-expands to any string of characters whether or not it contains "/".
+expands to any string of characters whether or not it contains a
+directory separator.
 Furthermore, if the pattern starts with "ignorecase:" (case
 insensitive), then this prefix will be removed and any character in
 the string can be replaced with an upper- or lowercase version of

=== modified file 'duplicity/GnuPGInterface.py'
--- duplicity/GnuPGInterface.py	2010-07-22 19:15:11 +0000
+++ duplicity/GnuPGInterface.py	2010-10-25 15:49:45 +0000
@@ -220,42 +220,55 @@
 or see http://www.gnu.org/copyleft/lesser.html
 """
 
+import errno
 import os
+import subprocess
 import sys
-import fcntl
-
-from duplicity import log
-
-try:
-    import threading
+
+if sys.platform == "win32":
+    # Required windows-only imports
+    import msvcrt
+    import _subprocess
+
+# Define next function for Python pre-2.6
+try:
+    next
+except NameError:
+    def next(itr):
+        return itr.next()
+
+try:
+    import fcntl
 except ImportError:
-    import dummy_threading #@UnusedImport
-    log.Warn("Threading not available -- zombie processes may appear")
+    # import success/failure is checked before use
+    pass
 
 __author__   = "Frank J. Tobin, ftobin@xxxxxxxxxxxxxxx"
 __version__  = "0.3.2"
-__revision__ = "$Id: GnuPGInterface.py,v 1.6 2009/06/06 17:35:19 loafman Exp $"
+__revision__ = "$Id$"
 
 # "standard" filehandles attached to processes
 _stds = [ 'stdin', 'stdout', 'stderr' ]
 
 # the permissions each type of fh needs to be opened with
-_fd_modes = { 'stdin':      'w',
-              'stdout':     'r',
-              'stderr':     'r',
-              'passphrase': 'w',
-              'command':    'w',
-              'logger':     'r',
-              'status':     'r'
+_fd_modes = { 'stdin':      'wb',
+              'stdout':     'rb',
+              'stderr':     'rb',
+              'passphrase': 'wb',
+              'attribute':  'rb',
+              'command':    'wb',
+              'logger':     'rb',
+              'status':     'rb'
               }
 
 # correlation between handle names and the arguments we'll pass
 _fd_options = { 'passphrase': '--passphrase-fd',
                 'logger':     '--logger-fd',
                 'status':     '--status-fd',
-                'command':    '--command-fd' }
+                'command':    '--command-fd',
+                'attribute':  '--attribute-fd' }
 
-class GnuPG:
+class GnuPG(object):
     """Class instances represent GnuPG.
 
     Instance attributes of a GnuPG object are:
@@ -276,6 +289,8 @@
       the command-line options used when calling GnuPG.
     """
 
+    __slots__ = ['call', 'passphrase', 'options']
+
     def __init__(self):
         self.call = 'gpg'
         self.passphrase = None
@@ -349,14 +364,14 @@
         if attach_fhs == None: attach_fhs = {}
 
         for std in _stds:
-            if not attach_fhs.has_key(std) \
+            if std not in attach_fhs \
                and std not in create_fhs:
                 attach_fhs.setdefault(std, getattr(sys, std))
 
         handle_passphrase = 0
 
         if self.passphrase != None \
-           and not attach_fhs.has_key('passphrase') \
+           and 'passphrase' not in attach_fhs \
            and 'passphrase' not in create_fhs:
             handle_passphrase = 1
             create_fhs.append('passphrase')
@@ -366,7 +381,10 @@
 
         if handle_passphrase:
             passphrase_fh = process.handles['passphrase']
-            passphrase_fh.write( self.passphrase )
+            if sys.version_info >= (3, 0) and isinstance(self.passphrase, str):
+                passphrase_fh.write( self.passphrase.encode() )
+            else:
+                passphrase_fh.write( self.passphrase )
             passphrase_fh.close()
             del process.handles['passphrase']
 
@@ -379,45 +397,40 @@
 
         process = Process()
 
-        for fh_name in create_fhs + attach_fhs.keys():
-            if not _fd_modes.has_key(fh_name):
-                raise KeyError, \
-                      "unrecognized filehandle name '%s'; must be one of %s" \
-                      % (fh_name, _fd_modes.keys())
+        for fh_name in create_fhs + list(attach_fhs.keys()):
+            if fh_name not in _fd_modes:
+                raise KeyError("unrecognized filehandle name '%s'; must be one of %s" \
+                      % (fh_name, list(_fd_modes.keys())))
 
         for fh_name in create_fhs:
             # make sure the user doesn't specify a filehandle
             # to be created *and* attached
-            if attach_fhs.has_key(fh_name):
-                raise ValueError, \
-                      "cannot have filehandle '%s' in both create_fhs and attach_fhs" \
-                      % fh_name
+            if fh_name in attach_fhs:
+                raise ValueError("cannot have filehandle '%s' in both create_fhs and attach_fhs" \
+                      % fh_name)
 
             pipe = os.pipe()
             # fix by drt@xxxxxxxxxxxxx noting
             # that since pipes are unidirectional on some systems,
             # so we have to 'turn the pipe around'
             # if we are writing
-            if _fd_modes[fh_name] == 'w': pipe = (pipe[1], pipe[0])
+            if _fd_modes[fh_name][0] == 'w': pipe = (pipe[1], pipe[0])
+
+            # Close the parent end in child to prevent deadlock
+            if "fcntl" in globals():
+                fcntl.fcntl(pipe[0], fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+
             process._pipes[fh_name] = Pipe(pipe[0], pipe[1], 0)
 
         for fh_name, fh in attach_fhs.items():
             process._pipes[fh_name] = Pipe(fh.fileno(), fh.fileno(), 1)
 
-        process.pid = os.fork()
-        if process.pid != 0:
-            # start a threaded_waitpid on the child
-            process.thread = threading.Thread(target=threaded_waitpid,
-                                              name="wait%d" % process.pid,
-                                              args=(process,))
-            process.thread.start()
-
-        if process.pid == 0: self._as_child(process, gnupg_commands, args)
-        return self._as_parent(process)
-
-
-    def _as_parent(self, process):
-        """Stuff run after forking in parent"""
+        self._launch_process(process, gnupg_commands, args)
+        return self._handle_pipes(process)
+
+
+    def _handle_pipes(self, process):
+        """Deal with pipes after the child process has been created"""
         for k, p in process._pipes.items():
             if not p.direct:
                 os.close(p.child)
@@ -428,43 +441,137 @@
 
         return process
 
-
-    def _as_child(self, process, gnupg_commands, args):
-        """Stuff run after forking in child"""
-        # child
-        for std in _stds:
-            p = process._pipes[std]
-            os.dup2( p.child, getattr(sys, "__%s__" % std).fileno() )
-
-        for k, p in process._pipes.items():
-            if p.direct and k not in _stds:
-                # we want the fh to stay open after execing
-                fcntl.fcntl( p.child, fcntl.F_SETFD, 0 )
-
+    def _create_preexec_fn(self, process):
+        """Create and return a function to do cleanup before exec
+
+        The cleanup function will close all file descriptors which are not
+        needed by the child process.  This is required to prevent unnecessary
+        blocking on the final read of pipes not set FD_CLOEXEC due to gpg
+        inheriting an open copy of the input end of the pipe.  This can cause
+        delays in unrelated parts of the program or deadlocks in the case that
+        one end of the pipe is passed to attach_fds.
+
+        FIXME:  There is a race condition where a pipe can be created in
+        another thread after this function runs before exec is called and it
+        will not be closed.  This race condition will remain until a better
+        way to avoid closing the error pipe created by submodule is identified.
+        """
+        if sys.platform == "win32":
+            return None     # No cleanup necessary
+
+        try:
+            MAXFD = os.sysconf("SC_OPEN_MAX")
+            if MAXFD == -1:
+                MAXFD = 256
+        except:
+            MAXFD = 256
+
+        # Get list of fds to close now, so we don't close the error pipe
+        # created by submodule for reporting exec errors
+        child_fds = [p.child for p in process._pipes.values()]
+        child_fds.sort()
+        child_fds.append(MAXFD) # Sentinel value, simplifies code greatly
+
+        child_fds_iter = iter(child_fds)
+        child_fd = next(child_fds_iter)
+        while child_fd < 3:
+            child_fd = next(child_fds_iter)
+
+        extra_fds = []
+        # FIXME:  Is there a better (portable) way to list all open FDs?
+        for fd in range(3, MAXFD):
+            if fd > child_fd:
+                child_fd = next(child_fds_iter)
+
+            if fd == child_fd:
+                continue
+
+            try:
+                # Note:  Can't use lseek, can cause nul byte in pipes
+                #        where the position has not been set by read/write
+                #os.lseek(fd, os.SEEK_CUR, 0)
+                os.tcgetpgrp(fd)
+            except OSError:
+                # FIXME:  When support for Python 2.5 is dropped, use 'as'
+                oe = sys.exc_info()[1]
+                if oe.errno == errno.EBADF:
+                    continue
+
+            extra_fds.append(fd)
+
+        def preexec_fn():
+            # Note:  This function runs after standard FDs have been renumbered
+            #        from their original values to 0, 1, 2
+
+            for fd in extra_fds:
+                try:
+                    os.close(fd)
+                except OSError:
+                    pass
+
+            # Ensure that all descriptors passed to the child will remain open
+            # Arguably FD_CLOEXEC descriptors should be an argument error
+            # But for backwards compatibility, we just fix it here (after fork)
+            for fd in [0, 1, 2] + child_fds[:-1]:
+                try:
+                    fcntl.fcntl(fd, fcntl.F_SETFD, 0)
+                except OSError:
+                    # Will happen for renumbered FDs
+                    pass
+
+        return preexec_fn
+
+
+    def _launch_process(self, process, gnupg_commands, args):
+        """Run the child process"""
         fd_args = []
-
         for k, p in process._pipes.items():
             # set command-line options for non-standard fds
-            if k not in _stds:
-                fd_args.extend([ _fd_options[k], "%d" % p.child ])
+            if k in _stds:
+                continue
 
-            if not p.direct: os.close(p.parent)
+            if sys.platform == "win32":
+                # Must pass inheritable os file handle
+                curproc = _subprocess.GetCurrentProcess()
+                pchandle = msvcrt.get_osfhandle(p.child)
+                pcihandle = _subprocess.DuplicateHandle(
+                        curproc, pchandle, curproc, 0, 1,
+                        _subprocess.DUPLICATE_SAME_ACCESS)
+                fdarg = pcihandle.Detach()
+            else:
+                # Must pass file descriptor
+                fdarg = p.child
+            fd_args.extend([ _fd_options[k], str(fdarg) ])
 
         command = [ self.call ] + fd_args + self.options.get_args() \
                   + gnupg_commands + args
 
-        os.execvp( command[0], command )
-
-
-class Pipe:
+        if len(fd_args) > 0:
+            # Can't close all file descriptors
+            # Create preexec function to close what we can
+            preexec_fn = self._create_preexec_fn(process)
+
+        process._subproc = subprocess.Popen(command,
+                stdin=process._pipes['stdin'].child,
+                stdout=process._pipes['stdout'].child,
+                stderr=process._pipes['stderr'].child,
+                close_fds=not len(fd_args) > 0,
+                preexec_fn=preexec_fn,
+                shell=False)
+        process.pid = process._subproc.pid
+
+
+class Pipe(object):
     """simple struct holding stuff about pipes we use"""
+    __slots__ = ['parent', 'child', 'direct']
+
     def __init__(self, parent, child, direct):
         self.parent = parent
         self.child = child
         self.direct = direct
 
 
-class Options:
+class Options(object):
     """Objects of this class encompass options passed to GnuPG.
     This class is responsible for determining command-line arguments
     which are based on options.  It can be said that a GnuPG
@@ -493,6 +600,8 @@
 
       * homedir
       * default_key
+      * keyring
+      * secret_keyring
       * comment
       * compress_algo
       * options
@@ -536,38 +645,34 @@
     ['--armor', '--recipient', 'Alice', '--recipient', 'Bob', '--no-secmem-warning']
     """
 
+    booleans = ('armor', 'no_greeting',  'verbose',    'no_verbose',
+                'batch', 'always_trust', 'rfc1991',    'openpgp',
+                'quiet', 'no_options',   'textmode',   'force_v3_sigs')
+
+    metas = ('meta_pgp_5_compatible', 'meta_pgp_2_compatible',
+             'meta_interactive')
+
+    strings = ('homedir', 'default_key', 'comment', 'compress_algo',
+               'options', 'keyring', 'secret_keyring')
+
+    lists = ('encrypt_to', 'recipients')
+
+    __slots__ = booleans + metas + strings + lists + ('extra_args',)
+
     def __init__(self):
-        # booleans
-        self.armor = 0
-        self.no_greeting = 0
-        self.verbose = 0
-        self.no_verbose = 0
-        self.quiet = 0
-        self.batch = 0
-        self.always_trust = 0
-        self.rfc1991 = 0
-        self.openpgp = 0
-        self.force_v3_sigs = 0
-        self.no_options = 0
-        self.textmode = 0
+        for b in self.booleans:
+            setattr(self, b, 0)
 
-        # meta-option booleans
-        self.meta_pgp_5_compatible = 0
-        self.meta_pgp_2_compatible = 0
+        for m in self.metas:
+            setattr(self, m, 0)
         self.meta_interactive = 1
 
-        # strings
-        self.homedir = None
-        self.default_key = None
-        self.comment = None
-        self.compress_algo = None
-        self.options = None
-
-        # lists
-        self.encrypt_to = []
-        self.recipients = []
-
-        # miscellaneous arguments
+        for s in self.strings:
+            setattr(self, s, None)
+
+        for l in self.lists:
+            setattr(self, l, [])
+
         self.extra_args = []
 
     def get_args( self ):
@@ -583,6 +688,8 @@
         if self.comment != None: args.extend( [ '--comment', self.comment ] )
         if self.compress_algo != None: args.extend( [ '--compress-algo', self.compress_algo ] )
         if self.default_key != None: args.extend( [ '--default-key', self.default_key ] )
+        if self.keyring != None: args.extend( [ '--keyring', self.keyring ] )
+        if self.secret_keyring != None: args.extend( [ '--secret-keyring', self.secret_keyring ] )
 
         if self.no_options: args.append( '--no-options' )
         if self.armor: args.append( '--armor' )
@@ -615,7 +722,7 @@
         return args
 
 
-class Process:
+class Process(object):
     """Objects of this class encompass properties of a GnuPG
     process spawned by GnuPG.run().
 
@@ -637,43 +744,24 @@
     os.waitpid() to clean up the process, especially
     if multiple calls are made to run().
     """
+    __slots__ = ['_pipes', 'handles', 'pid', '_subproc']
 
     def __init__(self):
-        self._pipes   = {}
-        self.handles  = {}
-        self.pid      = None
-        self._waited  = None
-        self.thread   = None
-        self.returned = None
+        self._pipes  = {}
+        self.handles = {}
+        self.pid     = None
+        self._subproc = None
 
     def wait(self):
-        """
-        Wait on threaded_waitpid to exit and examine results.
-        Will raise an IOError if the process exits non-zero.
-        """
-        if self.returned == None:
-            self.thread.join()
-        if self.returned != 0:
-            raise IOError, "GnuPG exited non-zero, with code %d" % (self.returned >> 8)
-
-
-def threaded_waitpid(process):
-    """
-    When started as a thread with the Process object, thread
-    will execute an immediate waitpid() against the process
-    pid and will collect the process termination info.  This
-    will allow us to reap child processes as soon as possible,
-    thus freeing resources quickly.
-    """
-    try:
-        process.returned = os.waitpid(process.pid, 0)[1]
-    except:
-        log.Debug("GPG process %d terminated before wait()" % process.pid)
-        process.returned = 0
-
+        """Wait on the process to exit, allowing for child cleanup.
+        Will raise an IOError if the process exits non-zero."""
+
+        e = self._subproc.wait()
+        if e != 0:
+            raise IOError("GnuPG exited non-zero, with code %d" % e)
 
 def _run_doctests():
-    import doctest, GnuPGInterface #@UnresolvedImport
+    import doctest, GnuPGInterface
     return doctest.testmod(GnuPGInterface)
 
 # deprecated

=== modified file 'duplicity/backend.py'
--- duplicity/backend.py	2010-08-09 18:56:03 +0000
+++ duplicity/backend.py	2010-10-25 15:49:45 +0000
@@ -64,7 +64,8 @@
     @return: void
     """
     path = duplicity.backends.__path__[0]
-    assert path.endswith("duplicity/backends"), duplicity.backends.__path__
+    assert os.path.normcase(path).endswith("duplicity" + os.path.sep + "backends"), \
+            duplicity.backends.__path__
 
     files = os.listdir(path)
     for fn in files:
@@ -201,6 +202,12 @@
 
     Raise InvalidBackendURL on invalid URL's
     """
+
+    if sys.platform == "win32":
+        # Regex to match a path containing a Windows drive specifier
+        # Valid paths include "C:" "C:/" "C:stuff" "C:/stuff", not "C://stuff"
+        _drivespecre = re.compile("^[a-z]:(?![/\\\\]{2})", re.IGNORECASE)
+
     def __init__(self, url_string):
         self.url_string = url_string
         _ensure_urlparser_initialized()
@@ -266,6 +273,12 @@
         if not pu.scheme:
             return
 
+        # This happens with implicit local paths with a drive specifier
+        if sys.platform == "win32" and \
+                re.match(ParsedUrl._drivespecre, url_string):
+            self.scheme = ""
+            return
+
         # Our backends do not handle implicit hosts.
         if pu.scheme in urlparser.uses_netloc and not pu.hostname:
             raise InvalidBackendURL("Missing hostname in a backend URL which "

=== modified file 'duplicity/backends/localbackend.py'
--- duplicity/backends/localbackend.py	2010-07-22 19:15:11 +0000
+++ duplicity/backends/localbackend.py	2010-10-25 15:49:45 +0000
@@ -39,7 +39,16 @@
         # The URL form "file:MyFile" is not a valid duplicity target.
         if not parsed_url.path.startswith( '//' ):
             raise BackendException( "Bad file:// path syntax." )
-        self.remote_pathdir = path.Path(parsed_url.path[2:])
+
+        # According to RFC 1738, file URLs take the form
+        # file://<hostname>/<path> where <hostname> == "" is localhost
+        # However, for backwards compatibility, interpret file://stuff/... as
+        # being a relative path starting with directory stuff
+        if parsed_url.path[2] == '/':
+            pathstr = path.from_url_path(parsed_url.path[3:], is_abs=True)
+        else:
+            pathstr = path.from_url_path(parsed_url.path[2:], is_abs=False)
+        self.remote_pathdir = path.Path(pathstr)
 
     def put(self, source_path, remote_filename = None, rename = None):
         """If rename is set, try that first, copying if doesn't work"""

=== modified file 'duplicity/commandline.py'
--- duplicity/commandline.py	2010-10-06 15:57:51 +0000
+++ duplicity/commandline.py	2010-10-25 15:49:45 +0000
@@ -184,8 +184,21 @@
     def set_log_fd(fd):
         if fd < 1:
             raise optparse.OptionValueError("log-fd must be greater than zero.")
+        if sys.platform == "win32":
+            # Convert OS file handle to C file descriptor
+            import msvcrt
+            fd = msvcrt.open_osfhandle(fd, 1)   #  1 = _O_WRONLY
+            raise optparse.OptionValueError("Unable to open log-fd.")
         log.add_fd(fd)
 
+    def set_restore_dir(dir):
+        # Remove empty tail component, if any
+        head, tail = os.path.split(dir)
+        if not tail:
+            dir = head
+
+        globals.restore_dir = dir
+
     def set_time_sep(sep, opt):
         if sep == '-':
             raise optparse.OptionValueError("Dash ('-') not valid for time-separator.")
@@ -291,7 +304,7 @@
     # --archive-dir <path>
     parser.add_option("--file-to-restore", "-r", action="callback", type="file",
                       metavar=_("path"), dest="restore_dir",
-                      callback=lambda o, s, v, p: setattr(p.values, "restore_dir", v.rstrip('/')))
+                      callback=set_restore_dir)
 
     # Used to confirm certain destructive operations like deleting old files.
     parser.add_option("--force", action="store_true")

=== modified file 'duplicity/compilec.py'
--- duplicity/compilec.py	2009-04-01 15:07:45 +0000
+++ duplicity/compilec.py	2010-10-25 15:49:45 +0000
@@ -20,7 +20,9 @@
 # along with duplicity; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
-import sys, os
+import os
+import shutil
+import sys
 from distutils.core import setup, Extension
 
 assert len(sys.argv) == 1
@@ -33,5 +35,16 @@
                              ["_librsyncmodule.c"],
                              libraries=["rsync"])])
 
-assert not os.system("mv `find build -name _librsync.so` .")
-assert not os.system("rm -rf build")
+def find_any_of(filenames, basedir="."):
+    for dirpath, dirnames, dirfilenames in os.walk(basedir):
+        for filename in filenames:
+            if filename in dirfilenames:
+                return os.path.join(dirpath, filename)
+
+extfile = find_any_of(("_librsync.pyd", "_librsync.so"), "build")
+if not extfile:
+    sys.stderr.write("Can't find _librsync extension binary, build failed?\n")
+    sys.exit(1)
+
+os.rename(extfile, os.path.basename(extfile))
+shutil.rmtree("build")

=== modified file 'duplicity/dup_temp.py'
--- duplicity/dup_temp.py	2010-07-22 19:15:11 +0000
+++ duplicity/dup_temp.py	2010-10-25 15:49:45 +0000
@@ -161,9 +161,11 @@
         We have achieved the first checkpoint, make file visible and permanent.
         """
         assert not globals.restart
-        self.tdp.rename(self.dirpath.append(self.partname))
+        # Can't rename files open for write on Windows.  Wait for close hook
+        if sys.platform != "win32":
+            self.tdp.rename(self.dirpath.append(self.partname))
+            del self.hooklist[0]
         self.fileobj.flush()
-        del self.hooklist[0]
 
     def to_remote(self):
         """
@@ -173,13 +175,15 @@
         pr = file_naming.parse(self.remname)
         src = self.dirpath.append(self.partname)
         tgt = self.dirpath.append(self.remname)
-        src_iter = SrcIter(src)
         if pr.compressed:
+            src_iter = SrcIter(src)
             gpg.GzipWriteFile(src_iter, tgt.name, size = sys.maxint)
         elif pr.encrypted:
+            src_iter = SrcIter(src)
             gpg.GPGWriteFile(src_iter, tgt.name, globals.gpg_profile, size = sys.maxint)
         else:
-            os.system("cp -p %s %s" % (src.name, tgt.name))
+            src.copy(tgt)
+            src.copy_attribs(tgt)
         globals.backend.put(tgt) #@UndefinedVariable
         os.unlink(tgt.name)
 
@@ -189,9 +193,9 @@
         """
         src = self.dirpath.append(self.partname)
         tgt = self.dirpath.append(self.permname)
-        src_iter = SrcIter(src)
         pr = file_naming.parse(self.permname)
         if pr.compressed:
+            src_iter = SrcIter(src)
             gpg.GzipWriteFile(src_iter, tgt.name, size = sys.maxint)
             os.unlink(src.name)
         else:

=== modified file 'duplicity/globals.py'
--- duplicity/globals.py	2010-08-26 13:01:10 +0000
+++ duplicity/globals.py	2010-10-25 15:49:45 +0000
@@ -21,7 +21,7 @@
 
 """Store global configuration information"""
 
-import socket, os
+import sys, socket, os
 
 # The current version of duplicity
 version = "$version"
@@ -36,16 +36,59 @@
 # The symbolic name of the backup being operated upon.
 backup_name = None
 
+# On Windows, use SHGetFolderPath for determining program directories
+if sys.platform == "win32":
+    import ctypes
+    import ctypes.wintypes as wintypes
+    windll = ctypes.windll
+
+    CSIDL_APPDATA = 0x001a
+    CSIDL_LOCAL_APPDATA = 0x001c
+    def get_csidl_folder_path(csidl):
+        SHGetFolderPath = windll.shell32.SHGetFolderPathW
+        SHGetFolderPath.argtypes = [
+                wintypes.HWND,
+                ctypes.c_int,
+                wintypes.HANDLE,
+                wintypes.DWORD,
+                wintypes.LPWSTR,
+                ]
+        folderpath = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
+        result = SHGetFolderPath(0, csidl, 0, 0, folderpath)
+        if result != 0:
+            raise WindowsError(result, "Unable to get folder path")
+        return folderpath.value
+
+
 # Set to the Path of the archive directory (the directory which
 # contains the signatures and manifests of the relevent backup
 # collection), and for checkpoint state between volumes.
 # NOTE: this gets expanded in duplicity.commandline
-os.environ["XDG_CACHE_HOME"] = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
-archive_dir = os.path.expandvars("$XDG_CACHE_HOME/duplicity")
+if sys.platform == "win32":
+    try:
+        archive_dir = get_csidl_folder_path(CSIDL_LOCAL_APPDATA)
+    except WindowsError:
+        try:
+            archive_dir = get_csidl_folder_path(CSIDL_APPDATA)
+        except WindowsError:
+            archive_dir = os.getenv("LOCALAPPDATA") or \
+                    os.getenv("APPDATA") or \
+                    os.path.expanduser("~")
+    archive_dir = os.path.join(archive_dir, "Duplicity", "Archives")
+else:
+    os.environ["XDG_CACHE_HOME"] = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
+    archive_dir = os.path.expandvars("$XDG_CACHE_HOME/duplicity")
 
 # config dir for future use
-os.environ["XDG_CONFIG_HOME"] = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
-config_dir = os.path.expandvars("$XDG_CONFIG_HOME/duplicity")
+if sys.platform == "win32":
+    try:
+        config_dir = get_csidl_folder_path(CSIDL_APPDATA)
+    except WindowsError:
+        config_dir = os.getenv("APPDATA") or os.path.expanduser("~")
+    config_dir = os.path.join(archive_dir, "Duplicity", "Config")
+else:
+    os.environ["XDG_CONFIG_HOME"] = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
+    config_dir = os.path.expandvars("$XDG_CONFIG_HOME/duplicity")
 
 # Restores will try to bring back the state as of the following time.
 # If it is None, default to current time.

=== modified file 'duplicity/manifest.py'
--- duplicity/manifest.py	2010-07-22 19:15:11 +0000
+++ duplicity/manifest.py	2010-10-25 15:49:45 +0000
@@ -25,6 +25,7 @@
 
 from duplicity import log
 from duplicity import globals
+from duplicity import path
 from duplicity import util
 
 class ManifestError(Exception):
@@ -67,7 +68,8 @@
             if self.hostname:
                 self.fh.write("Hostname %s\n" % self.hostname)
             if self.local_dirname:
-                self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
+                self.fh.write("Localdir %s\n" % \
+                        Quote(path.to_posix(self.local_dirname)))
         return self
 
     def check_dirinfo(self):
@@ -146,7 +148,8 @@
         if self.hostname:
             result += "Hostname %s\n" % self.hostname
         if self.local_dirname:
-            result += "Localdir %s\n" % Quote(self.local_dirname)
+            result += "Localdir %s\n" % \
+                Quote(path.to_posix(self.local_dirname))
 
         vol_num_list = self.volume_info_dict.keys()
         vol_num_list.sort()
@@ -173,6 +176,9 @@
                 return Unquote(m.group(2))
         self.hostname = get_field("hostname")
         self.local_dirname = get_field("localdir")
+        if self.local_dirname:
+            self.local_dirname = path.from_posix(self.local_dirname,
+                    globals.local_path and globals.local_path.name)
 
         next_vi_string_regexp = re.compile("(^|\\n)(volume\\s.*?)"
                                            "(\\nvolume\\s|$)", re.I | re.S)
@@ -221,7 +227,7 @@
         Write string version of manifest to given path
         """
         assert not path.exists()
-        fout = path.open("w")
+        fout = path.open("wb")
         fout.write(self.to_string())
         assert not fout.close()
         path.setdata()

=== modified file 'duplicity/patchdir.py'
--- duplicity/patchdir.py	2010-07-22 19:15:11 +0000
+++ duplicity/patchdir.py	2010-10-25 15:49:45 +0000
@@ -19,7 +19,9 @@
 # along with duplicity; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
+import os
 import re #@UnusedImport
+import sys
 import types
 import tempfile
 
@@ -470,6 +472,19 @@
         if not isinstance( current_file, file ):
             # librsync needs true file
             tempfp = tempfile.TemporaryFile( dir=globals.temproot )
+            if os.name == "nt":
+                # Temp wrapper is unnecessary, file opened O_TEMPORARY
+                tempfp = tempfp.file
+            elif os.name != "posix" and sys.platform != "cygwin":
+                # Note to future developers:
+                # librsync needs direct access to the underlying file.
+                # On these systems temporary files are wrapper objects
+                # The wrapper must be retained until file access is finished
+                # so that when it is released the file can be deleted
+                raise NotImplementedError(
+                        "No support for direct access of temporary files " +
+                        "on this platform")
+
             misc.copyfileobj( current_file, tempfp )
             assert not current_file.close()
             tempfp.seek( 0 )

=== modified file 'duplicity/path.py'
--- duplicity/path.py	2010-07-22 19:15:11 +0000
+++ duplicity/path.py	2010-10-25 15:49:45 +0000
@@ -41,6 +41,99 @@
 _copy_blocksize = 64 * 1024
 _tmp_path_counter = 1
 
+def split_all(path):
+    """
+    Split path into components
+
+    Invariant:  os.path.join(*split_all(path)) == path for a normalized path
+
+    @rtype list
+    @return List of components of path, beginning with "/" or a drive specifier
+            if absolute, ending with "" if path ended with a separator
+    """
+    parts = []
+    path, part = os.path.split(path)
+
+    # Special case for paths which end with a separator so rest is still split
+    if part == "":
+        parts.append(part)
+        path, part = os.path.split(path)
+
+    while path != "" and part != "":
+        parts.append(part)
+        path, part = os.path.split(path)
+
+    # Append root (path) for absolute path, or first relative component (part)
+    if path != "":
+        parts.append(path)
+    else:
+        parts.append(part)
+
+    parts.reverse()
+    return parts
+
+
+def from_posix(path, refpath=None):
+    """
+    Convert a POSIX-style path to the native path representation
+
+    Copy drive specification (if any) from refpath (if given)
+    """
+
+    # If native path representation is POSIX, no work needs to be done
+    if os.path.__name__ == "posixpath":
+        return path
+
+    parts = path.split("/")
+    if parts[0] == "":
+        parts[0] = os.path.sep
+
+    if refpath is not None:
+        drive = os.path.splitdrive(refpath)[0]
+        if drive:
+            parts.insert(0, drive)
+
+    return os.path.join(*parts)
+
+
+def to_posix(path):
+    """
+    Convert a path from the native path representation to a POSIX-style path
+
+    The path is broken into components according to split_all, then recombined
+    with "/" separating components.  Any drive specifier is omitted.
+    """
+
+    # If native path representation is POSIX, no work needs to be done
+    if os.path.__name__ == "posixpath":
+        return path
+
+    parts = split_all(path)
+    if os.path.isabs(path):
+        return "/" + "/".join(parts[1:])
+    else:
+        return "/".join(parts)
+
+
+def from_url_path(url_path, is_abs=True):
+    """
+    Convert the <path> component of a file URL into a path in the native path
+    representation.
+    """
+
+    parts = url_path.split("/")
+    if is_abs:
+        if os.path.__name__ == "posixpath":
+            parts.insert(0, "/")
+        elif os.path.__name__ == "ntpath":
+            parts[0] += os.path.sep
+        else:
+            raise NotImplementedException(
+                    "Method to create an absolute path not known")
+
+    return os.path.join(*parts)
+
+
 class StatResult:
     """Used to emulate the output of os.stat() and related"""
     # st_mode is required by the TarInfo class, but it's unclear how
@@ -142,7 +235,7 @@
     def get_relative_path(self):
         """Return relative path, created from index"""
         if self.index:
-            return "/".join(self.index)
+            return os.path.join(*self.index)
         else:
             return "."
 
@@ -435,7 +528,8 @@
     def copy_attribs(self, other):
         """Only copy attributes from self to other"""
         if isinstance(other, Path):
-            util.maybe_ignore_errors(lambda: os.chown(other.name, self.stat.st_uid, self.stat.st_gid))
+            if hasattr(os, "chown"):
+                util.maybe_ignore_errors(lambda: os.chown(other.name, self.stat.st_uid, self.stat.st_gid))
             util.maybe_ignore_errors(lambda: os.chmod(other.name, self.mode))
             util.maybe_ignore_errors(lambda: os.utime(other.name, (time.time(), self.stat.st_mtime)))
             other.setdata()
@@ -490,7 +584,7 @@
         try:
             self.stat = os.lstat(self.name)
         except OSError, e:
-            err_string = errno.errorcode[e[0]]
+            err_string = errno.errorcode.get(e[0])
             if err_string == "ENOENT" or err_string == "ENOTDIR" or err_string == "ELOOP":
                 self.stat, self.type = None, None # file doesn't exist
                 self.mode = None
@@ -578,11 +672,7 @@
         if self.index:
             return Path(self.base, self.index[:-1])
         else:
-            components = self.base.split("/")
-            if len(components) == 2 and not components[0]:
-                return Path("/") # already in root directory
-            else:
-                return Path("/".join(components[:-1]))
+            return Path(os.path.dirname(self.base))
 
     def writefileobj(self, fin):
         """Copy file object fin to self.  Close both when done."""
@@ -672,9 +762,7 @@
 
     def get_filename(self):
         """Return filename of last component"""
-        components = self.name.split("/")
-        assert components and components[-1]
-        return components[-1]
+        return os.path.basename(self.name)
 
     def get_canonical(self):
         """
@@ -684,12 +772,9 @@
         it's harder to remove "..", as "foo/bar/.." is not necessarily
         "foo", so we can't use path.normpath()
         """
-        newpath = "/".join(filter(lambda x: x and x != ".",
-                                  self.name.split("/")))
-        if self.name[0] == "/":
-            return "/" + newpath
-        elif newpath:
-            return newpath
+        pathparts = filter(lambda x: x and x != ".", split_all(self.name))
+        if pathparts:
+            return os.path.join(*pathparts)
         else:
             return "."
 

=== modified file 'duplicity/selection.py'
--- duplicity/selection.py	2010-07-22 19:15:11 +0000
+++ duplicity/selection.py	2010-10-25 15:49:45 +0000
@@ -23,12 +23,15 @@
 import re #@UnusedImport
 import stat #@UnusedImport
 
-from duplicity.path import * #@UnusedWildImport
+from duplicity import path
 from duplicity import log #@Reimport
 from duplicity import globals #@Reimport
 from duplicity import diffdir
 from duplicity import util #@Reimport
 
+# For convenience
+Path = path.Path
+
 """Iterate exactly the requested files in a directory
 
 Parses includes and excludes to yield correct files.  More
@@ -93,6 +96,11 @@
         self.rootpath = path
         self.prefix = self.rootpath.name
 
+        # Make sure prefix names a directory so prefix matching doesn't
+        # match partial directory names
+        if os.path.basename(self.prefix) != "":
+            self.prefix = os.path.join(self.prefix, "")
+
     def set_iter(self):
         """Initialize generator, prepare to iterate."""
         self.rootpath.setdata() # this may have changed since Select init
@@ -381,7 +389,7 @@
         if not line.startswith(self.prefix):
             raise FilePrefixError(line)
         line = line[len(self.prefix):] # Discard prefix
-        index = tuple(filter(lambda x: x, line.split("/"))) # remove empties
+        index = tuple(path.split_all(line)) # remove empties
         return (index, include)
 
     def filelist_pair_match(self, path, pair):
@@ -532,8 +540,7 @@
         """
         if not filename.startswith(self.prefix):
             raise FilePrefixError(filename)
-        index = tuple(filter(lambda x: x,
-                             filename[len(self.prefix):].split("/")))
+        index = tuple(path.split_all(filename[len(self.prefix):]))
         return self.glob_get_tuple_sf(index, include)
 
     def glob_get_tuple_sf(self, tuple, include):
@@ -614,17 +621,14 @@
 
     def glob_get_prefix_res(self, glob_str):
         """Return list of regexps equivalent to prefixes of glob_str"""
-        glob_parts = glob_str.split("/")
+        glob_parts = path.split_all(glob_str)
         if "" in glob_parts[1:-1]:
             # "" OK if comes first or last, as in /foo/
             raise GlobbingError("Consecutive '/'s found in globbing string "
                                 + glob_str)
 
-        prefixes = map(lambda i: "/".join(glob_parts[:i+1]),
+        prefixes = map(lambda i: os.path.join(*glob_parts[:i+1]),
                        range(len(glob_parts)))
-        # we must make exception for root "/", only dir to end in slash
-        if prefixes[0] == "":
-            prefixes[0] = "/"
         return map(self.glob_to_re, prefixes)
 
     def glob_to_re(self, pat):
@@ -638,6 +642,12 @@
         by Donovan Baarda.
 
         """
+        # Build regex for non-directory separator characters
+        notsep = os.path.sep
+        if os.path.altsep:
+            notsep += os.path.altsep
+        notsep = "[^" + notsep.replace("\\", "\\\\") + "]"
+
         i, n, res = 0, len(pat), ''
         while i < n:
             c, s = pat[i], pat[i:i+2]
@@ -646,9 +656,9 @@
                 res = res + '.*'
                 i = i + 1
             elif c == '*':
-                res = res + '[^/]*'
+                res = res + notsep + '*'
             elif c == '?':
-                res = res + '[^/]'
+                res = res + notsep
             elif c == '[':
                 j = i
                 if j < n and pat[j] in '!^':

=== modified file 'duplicity/tarfile.py'
--- duplicity/tarfile.py	2010-07-22 19:15:11 +0000
+++ duplicity/tarfile.py	2010-10-25 15:49:45 +0000
@@ -1683,8 +1683,10 @@
 def set_pwd_dict():
     """Set global pwd caching dictionaries uid_dict and uname_dict"""
     global uid_dict, uname_dict
-    assert uid_dict is None and uname_dict is None and pwd
+    assert uid_dict is None and uname_dict is None
     uid_dict = {}; uname_dict = {}
+    if pwd is None:
+        return
     for entry in pwd.getpwall():
         uname = entry[0]; uid = entry[2]
         uid_dict[uid] = uname
@@ -1702,8 +1704,10 @@
 
 def set_grp_dict():
     global gid_dict, gname_dict
-    assert gid_dict is None and gname_dict is None and grp
+    assert gid_dict is None and gname_dict is None
     gid_dict = {}; gname_dict = {}
+    if grp is None:
+        return
     for entry in grp.getgrall():
         gname = entry[0]; gid = entry[2]
         gid_dict[gid] = gname

=== removed file 'po/update-pot'
--- po/update-pot	2009-09-15 02:13:01 +0000
+++ po/update-pot	1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@
-#!/bin/sh
-
-intltool-update --pot -g duplicity
-sed -e 's/^#\. TRANSL:/#./' -i duplicity.pot

=== added file 'po/update-pot.py'
--- po/update-pot.py	1970-01-01 00:00:00 +0000
+++ po/update-pot.py	2010-10-25 15:49:45 +0000
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import os
+import re
+import sys
+import tempfile
+
+retval = os.system('intltool-update --pot -g duplicity')
+if retval != 0:
+    # intltool-update failed and already wrote errors, propagate failure
+    sys.exit(retval)
+
+replre = re.compile('^#\. TRANSL:')
+with open("duplicity.pot", "rb") as potfile:
+    with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
+        tmpfilename = tmpfile.name
+        for line in potfile:
+            tmpfile.write(re.sub(replre, "", line))
+
+os.remove("duplicity.pot")
+os.rename(tmpfilename, "duplicity.pot")


Follow ups