← Back to team overview

duplicity-team team mailing list archive

[Merge] lp:~aaron-whitehouse/duplicity/PEP8_line_length into lp:duplicity

 

Aaron Whitehouse has proposed merging lp:~aaron-whitehouse/duplicity/PEP8_line_length into lp:duplicity.

Requested reviews:
  duplicity-team (duplicity-team)

For more details, see:
https://code.launchpad.net/~aaron-whitehouse/duplicity/PEP8_line_length/+merge/298717

Set line length error length to 120 (matching tox.ini) for PEP8 and fixed E501(line too long) errors.
-- 
Your team duplicity-team is requested to review the proposed merge of lp:~aaron-whitehouse/duplicity/PEP8_line_length into lp:duplicity.
=== modified file 'bin/duplicity'
--- bin/duplicity	2016-06-12 13:13:57 +0000
+++ bin/duplicity	2016-06-29 22:46:17 +0000
@@ -210,12 +210,15 @@
                 pass2 = getpass_safe(_("Retype passphrase to confirm: "))
 
             if not pass1 == pass2:
-                log.Log(_("First and second passphrases do not match!  Please try again."), log.WARNING, force_print=True)
+                log.Log(_("First and second passphrases do not match!  Please try again."),
+                        log.WARNING, force_print=True)
                 use_cache = False
                 continue
 
-            if not pass1 and not (globals.gpg_profile.recipients or globals.gpg_profile.hidden_recipients) and not for_signing:
-                log.Log(_("Cannot use empty passphrase with symmetric encryption!  Please try again."), log.WARNING, force_print=True)
+            if not pass1 and not (globals.gpg_profile.recipients or
+                                  globals.gpg_profile.hidden_recipients) and not for_signing:
+                log.Log(_("Cannot use empty passphrase with symmetric encryption!  Please try again."),
+                        log.WARNING, force_print=True)
                 use_cache = False
                 continue
 
@@ -427,7 +430,8 @@
 
         # write volume
         if globals.encryption:
-            at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, globals.gpg_profile, globals.volsize)
+            at_end = gpg.GPGWriteFile(tarblock_iter, tdp.name, globals.gpg_profile,
+                                      globals.volsize)
         elif globals.compression:
             at_end = gpg.GzipWriteFile(tarblock_iter, tdp.name, globals.volsize)
         else:
@@ -449,13 +453,14 @@
             sig_outfp.flush()
             man_outfp.flush()
 
-        async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename, vol_num: put(tdp, dest_filename, vol_num),
+        async_waiters.append(io_scheduler.schedule_task(lambda tdp, dest_filename,
+                                                        vol_num: put(tdp, dest_filename, vol_num),
                                                         (tdp, dest_filename, vol_num)))
 
         # Log human-readable version as well as raw numbers for machine consumers
         log.Progress(_('Processed volume %d') % vol_num, diffdir.stats.SourceFileSize)
-        # Snapshot (serialize) progress now as a Volume has been completed. This is always the last restore point
-        # when it comes to restart a failed backup
+        # Snapshot (serialize) progress now as a Volume has been completed.
+        # This is always the last restore point when it comes to restart a failed backup
         if globals.progress:
             progress.tracker.snapshot_progress(vol_num)
 
@@ -637,7 +642,8 @@
         if dup_time.curtime == dup_time.prevtime:
             time.sleep(2)
             dup_time.setcurtime()
-            assert dup_time.curtime != dup_time.prevtime, "time not moving forward at appropriate pace - system clock issues?"
+            assert dup_time.curtime != dup_time.prevtime, \
+                "time not moving forward at appropriate pace - system clock issues?"
 
     if globals.progress:
         progress.tracker = progress.ProgressTracker()
@@ -797,7 +803,8 @@
     verified, hash_pair, calculated_hash = restore_check_hash(volume_info, tdp)
     if not verified:
         log.FatalError("%s\n %s\n %s\n %s\n" %
-                       (_("Invalid data - %s hash mismatch for file:") % hash_pair[0],
+                       (_("Invalid data - %s hash mismatch for file:") %
+                        hash_pair[0],
                         util.ufn(filename),
                         _("Calculated hash: %s") % calculated_hash,
                         _("Manifest hash: %s") % hash_pair[1]),
@@ -980,7 +987,8 @@
         chainlist += col_stats.get_signature_chains_older_than(globals.remove_time)
         chainlist.reverse()  # save oldest for last
         for chain in chainlist:
-            # if remove_all_inc_of_but_n_full_mode mode, remove only incrementals one and not full
+            # if remove_all_inc_of_but_n_full_mode mode, remove only
+            # incrementals one and not full
             if globals.remove_all_inc_of_but_n_full_mode:
                 if isinstance(chain, collections.SignatureChain):
                     chain_desc = _("Deleting any incremental signature chain rooted at %s")
@@ -1077,11 +1085,13 @@
     def remove_local(fn):
         del_name = globals.archive_dir.append(fn).name
 
-        log.Notice(_("Deleting local %s (not authoritative at backend).") % util.ufn(del_name))
+        log.Notice(_("Deleting local %s (not authoritative at backend).") %
+                   util.ufn(del_name))
         try:
             util.ignore_missing(os.unlink, del_name)
         except Exception as e:
-            log.Warn(_("Unable to delete %s: %s") % (util.ufn(del_name), util.uexc(e)))
+            log.Warn(_("Unable to delete %s: %s") % (util.ufn(del_name),
+                                                     util.uexc(e)))
 
     def copy_to_local(fn):
         """
@@ -1495,7 +1505,10 @@
             # symmetric key
             if (globals.gpg_profile.signing_passphrase and
                     globals.gpg_profile.passphrase != globals.gpg_profile.signing_passphrase):
-                log.FatalError(_("When using symmetric encryption, the signing passphrase must equal the encryption passphrase."), log.ErrorCode.user_error)
+                log.FatalError(_(
+                    "When using symmetric encryption, the signing passphrase "
+                    "must equal the encryption passphrase."),
+                    log.ErrorCode.user_error)
 
         if action == "full":
             full_backup(col_stats)

=== modified file 'duplicity/backend.py'
--- duplicity/backend.py	2016-03-01 16:19:12 +0000
+++ duplicity/backend.py	2016-06-29 22:46:17 +0000
@@ -301,7 +301,9 @@
         except Exception:  # not raised in python2.7+, just returns None
             # old style rsync://host::[/]dest, are still valid, though they contain no port
             if not (self.scheme in ['rsync'] and re.search('::[^:]*$', self.url_string)):
-                raise InvalidBackendURL("Syntax error (port) in: %s A%s B%s C%s" % (url_string, (self.scheme in ['rsync']), re.search('::[^:]+$', self.netloc), self.netloc))
+                raise InvalidBackendURL("Syntax error (port) in: %s A%s B%s C%s" %
+                                        (url_string, (self.scheme in ['rsync']),
+                                         re.search('::[^:]+$', self.netloc), self.netloc))
 
         # Our URL system uses two slashes more than urlparse's does when using
         # non-netloc URLs.  And we want to make sure that if urlparse assuming

=== modified file 'duplicity/backends/_boto_multi.py'
--- duplicity/backends/_boto_multi.py	2014-12-12 14:39:54 +0000
+++ duplicity/backends/_boto_multi.py	2016-06-29 22:46:17 +0000
@@ -157,7 +157,8 @@
                 else:
                     raise multiprocessing.TimeoutError
             except multiprocessing.TimeoutError:
-                log.Debug("%s tasks did not finish by the specified timeout, aborting multipart upload and resetting pool." % len(tasks))
+                log.Debug("%s tasks did not finish by the specified timeout,"
+                          "aborting multipart upload and resetting pool." % len(tasks))
                 self._setup_pool()
                 break
 
@@ -204,7 +205,10 @@
                                                  num_cb=max(2, 8 * bytes / (1024 * 1024))
                                                  )  # Max num of callbacks = 8 times x megabyte
                         end = time.time()
-                        log.Debug("{name}: Uploaded chunk {chunk} at roughly {speed} bytes/second".format(name=worker_name, chunk=offset + 1, speed=(bytes / max(1, abs(end - start)))))
+                        log.Debug(("{name}: Uploaded chunk {chunk}"
+                                  "at roughly {speed} bytes/second").format(name=worker_name,
+                                                                            chunk=offset + 1,
+                                                                            speed=(bytes / max(1, abs(end - start)))))
                     break
             conn.close()
             conn = None

=== modified file 'duplicity/backends/_boto_single.py'
--- duplicity/backends/_boto_single.py	2016-04-17 16:47:20 +0000
+++ duplicity/backends/_boto_single.py	2016-06-29 22:46:17 +0000
@@ -238,7 +238,9 @@
         upload_end = time.time()
         total_s = abs(upload_end - upload_start) or 1  # prevent a zero value!
         rough_upload_speed = os.path.getsize(source_path.name) / total_s
-        log.Debug("Uploaded %s/%s to %s Storage at roughly %f bytes/second" % (self.straight_url, remote_filename, storage_class, rough_upload_speed))
+        log.Debug("Uploaded %s/%s to %s Storage at roughly %f bytes/second" %
+                  (self.straight_url, remote_filename, storage_class,
+                   rough_upload_speed))
 
     def _get(self, remote_filename, local_path):
         key_name = self.key_prefix + remote_filename

=== modified file 'duplicity/backends/acdclibackend.py'
--- duplicity/backends/acdclibackend.py	2016-02-18 16:28:31 +0000
+++ duplicity/backends/acdclibackend.py	2016-06-29 22:46:17 +0000
@@ -133,7 +133,8 @@
 
     def _delete(self, remote_filename):
         """Delete remote_filename"""
-        remote_file_path = os.path.join(urllib.unquote(self.parsed_url.path.replace('///', '/')), remote_filename).rstrip()
+        remote_file_path = os.path.join(urllib.unquote(self.parsed_url.path.replace('///', '/')),
+                                        remote_filename).rstrip()
         commandline = self.acd_cmd + " rm '%s'" % (remote_file_path)
         self.subprocess_popen(commandline)
 

=== modified file 'duplicity/backends/dpbxbackend.py'
--- duplicity/backends/dpbxbackend.py	2016-05-30 14:14:05 +0000
+++ duplicity/backends/dpbxbackend.py	2016-06-29 22:46:17 +0000
@@ -103,7 +103,8 @@
         return os.environ.get('DPBX_ACCESS_TOKEN', None)
 
     def save_access_token(self, access_token):
-        raise BackendException('dpbx: Please set DPBX_ACCESS_TOKEN=\"%s\" environment variable' % access_token)
+        raise BackendException('dpbx: Please set DPBX_ACCESS_TOKEN=\"%s\" environment variable' %
+                               access_token)
 
     def obtain_access_token(self):
         log.Info("dpbx: trying to obtain access token")
@@ -115,7 +116,8 @@
         app_secret = os.environ['DPBX_APP_SECRET']
 
         if not sys.stdout.isatty() or not sys.stdin.isatty():
-            log.FatalError('dpbx error: cannot interact, but need human attention', log.ErrorCode.backend_command_error)
+            log.FatalError('dpbx error: cannot interact, but need human attention',
+                           log.ErrorCode.backend_command_error)
 
         auth_flow = DropboxOAuth2FlowNoRedirect(app_key, app_secret)
         log.Debug('dpbx,auth_flow.start()')
@@ -152,10 +154,12 @@
 
             self.obtain_access_token()
 
-            # We're assuming obtain_access_token will throw exception. So this line should not be reached
+            # We're assuming obtain_access_token will throw exception.
+            # So this line should not be reached
             raise BackendException("dpbx: Please update DPBX_ACCESS_TOKEN and try again")
 
-        log.Info("dpbx: Successfully authenticated as %s" % self.api_account.name.display_name)
+        log.Info("dpbx: Successfully authenticated as %s" %
+                 self.api_account.name.display_name)
 
     def _error_code(self, operation, e):
         if isinstance(e, ApiError):
@@ -185,16 +189,22 @@
 
         # A few sanity checks
         if res_metadata.path_display != remote_path:
-            raise BackendException('dpbx: result path mismatch: %s (expected: %s)' % (res_metadata.path_display, remote_path))
+            raise BackendException('dpbx: result path mismatch: %s (expected: %s)' %
+                                   (res_metadata.path_display, remote_path))
         if res_metadata.size != file_size:
-            raise BackendException('dpbx: result size mismatch: %s (expected: %s)' % (res_metadata.size, file_size))
+            raise BackendException('dpbx: result size mismatch: %s (expected: %s)' %
+                                   (res_metadata.size, file_size))
 
     def put_file_small(self, source_path, remote_path):
         file_size = os.path.getsize(source_path.name)
         f = source_path.open('rb')
         try:
             log.Debug('dpbx,files_upload(%s, [%d bytes])' % (remote_path, file_size))
-            res_metadata = self.api_client.files_upload(f, remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True)
+            res_metadata = self.api_client.files_upload(f, remote_path,
+                                                        mode=WriteMode.overwrite,
+                                                        autorename=False,
+                                                        client_modified=None,
+                                                        mute=True)
             log.Debug('dpbx,files_upload(): %s' % res_metadata)
             progress.report_transfer(file_size, file_size)
             return res_metadata
@@ -206,11 +216,14 @@
         f = source_path.open('rb')
         try:
             buf = f.read(DPBX_UPLOAD_CHUNK_SIZE)
-            log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' % (len(buf), file_size))
+            log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' %
+                      (len(buf), file_size))
             upload_sid = self.api_client.files_upload_session_start(buf)
             log.Debug('dpbx,files_upload_session_start(): %s' % upload_sid)
             upload_cursor = UploadSessionCursor(upload_sid.session_id, f.tell())
-            commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True)
+            commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite,
+                                     autorename=False, client_modified=None,
+                                     mute=True)
             res_metadata = None
             progress.report_transfer(f.tell(), file_size)
 
@@ -220,7 +233,8 @@
             is_eof = False
 
             # We're doing our own error handling and retrying logic because
-            # we can benefit from Dpbx chunked upload and retry only failed chunk
+            # we can benefit from Dpbx chunked upload and retry only failed
+            # chunk
             while not is_eof or not res_metadata:
                 try:
                     if requested_offset is not None:
@@ -241,25 +255,36 @@
 
                     if not is_eof:
                         assert len(buf) != 0
-                        log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset))
-                        self.api_client.files_upload_session_append(buf, upload_cursor.session_id, upload_cursor.offset)
+                        log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' %
+                                  (len(buf), upload_cursor.offset))
+                        self.api_client.files_upload_session_append(buf,
+                                                                    upload_cursor.session_id,
+                                                                    upload_cursor.offset)
                     else:
-                        log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset))
-                        res_metadata = self.api_client.files_upload_session_finish(buf, upload_cursor, commit_info)
+                        log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' %
+                                  (len(buf), upload_cursor.offset))
+                        res_metadata = self.api_client.files_upload_session_finish(buf,
+                                                                                   upload_cursor,
+                                                                                   commit_info)
 
                     upload_cursor.offset = f.tell()
-                    log.Debug('progress: %d of %d' % (upload_cursor.offset, file_size))
+                    log.Debug('progress: %d of %d' % (upload_cursor.offset,
+                                                      file_size))
                     progress.report_transfer(upload_cursor.offset, file_size)
                 except ApiError as e:
                     error = e.error
                     if isinstance(error, UploadSessionLookupError) and error.is_incorrect_offset():
-                        # Server reports that we should send another chunk. Most likely this is caused by
-                        # network error during previous upload attempt. In such case we'll get expected offset
-                        # from server and it's enough to just seek() and retry again
+                        # Server reports that we should send another chunk.
+                        # Most likely this is caused by network error during
+                        # previous upload attempt. In such case we'll get
+                        # expected offset from server and it's enough to just
+                        # seek() and retry again
                         new_offset = error.get_incorrect_offset().correct_offset
-                        log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' % (upload_cursor.offset, new_offset))
+                        log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' %
+                                  (upload_cursor.offset, new_offset))
                         if requested_offset is not None:
-                            # chunk failed even after seek attempt. Something strange and no safe way to recover
+                            # chunk failed even after seek attempt. Something
+                            # strange and no safe way to recover
                             raise BackendException("dpbx: unable to chunk upload")
                         else:
                             # will seek and retry
@@ -273,7 +298,9 @@
                     if retry_number == 0:
                         raise
 
-                    # We don't know for sure, was partial upload successfull or not. So it's better to retry smaller amount to avoid extra reupload
+                    # We don't know for sure, was partial upload successful or
+                    # not. So it's better to retry smaller amount to avoid extra
+                    # reupload
                     log.Info('dpbx: sleeping a bit before chunk retry')
                     time.sleep(30)
                     current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE / 5
@@ -298,7 +325,8 @@
 
         log.Debug('dpbx,files_download(%s)' % remote_path)
         res_metadata, http_fd = self.api_client.files_download(remote_path)
-        log.Debug('dpbx,files_download(%s): %s, %s' % (remote_path, res_metadata, http_fd))
+        log.Debug('dpbx,files_download(%s): %s, %s' % (remote_path, res_metadata,
+                                                       http_fd))
         file_size = res_metadata.size
         to_fd = None
         progress.report_transfer(0, file_size)
@@ -313,11 +341,12 @@
                 to_fd.close()
             http_fd.close()
 
-        # It's different from _query() check because we're not querying metadata again.
-        # Since this check is free, it's better to have it here
+        # It's different from _query() check because we're not querying metadata
+        # again. Since this check is free, it's better to have it here
         local_size = os.path.getsize(local_path.name)
         if local_size != file_size:
-            raise BackendException("dpbx: wrong file size: %d (expected: %d)" % (local_size, file_size))
+            raise BackendException("dpbx: wrong file size: %d (expected: %d)" %
+                                   (local_size, file_size))
 
         local_path.setdata()
 

=== modified file 'duplicity/backends/gdocsbackend.py'
--- duplicity/backends/gdocsbackend.py	2015-05-31 14:12:59 +0000
+++ duplicity/backends/gdocsbackend.py	2016-06-29 22:46:17 +0000
@@ -139,11 +139,12 @@
                 answer = raw_input('Answer to the challenge? ')
             self._authorize(email, password, challenge.captcha_token, answer)
         except gdata.client.BadAuthentication:
-            raise BackendException('Invalid user credentials given. Be aware that accounts '
-                                   'that use 2-step verification require creating an application specific '
-                                   'access code for using this Duplicity backend. Follow the instruction in '
-                                   'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
-                                   'and create your application-specific password to run duplicity backups.')
+            raise BackendException(
+                'Invalid user credentials given. Be aware that accounts '
+                'that use 2-step verification require creating an application specific '
+                'access code for using this Duplicity backend. Follow the instruction in '
+                'http://www.google.com/support/accounts/bin/static.py?page=guide.cs&guide=1056283&topic=1056286 '
+                'and create your application-specific password to run duplicity backups.')
 
     def _fetch_entries(self, folder_id, type, title=None):
         # Build URI.

=== modified file 'duplicity/backends/lftpbackend.py'
--- duplicity/backends/lftpbackend.py	2016-03-04 10:17:19 +0000
+++ duplicity/backends/lftpbackend.py	2016-06-29 22:46:17 +0000
@@ -105,7 +105,8 @@
 
         # save config into a reusable temp file
         self.tempfile, self.tempname = tempdir.default().mkstemp()
-        os.write(self.tempfile, "set ssl:verify-certificate " + ("false" if globals.ssl_no_check_certificate else "true") + "\n")
+        os.write(self.tempfile, "set ssl:verify-certificate " +
+                 ("false" if globals.ssl_no_check_certificate else "true") + "\n")
         if self.cacert_file:
             os.write(self.tempfile, "set ssl:ca-file " + cmd_quote(self.cacert_file) + "\n")
         if globals.ssl_cacert_path:

=== modified file 'duplicity/backends/multibackend.py'
--- duplicity/backends/multibackend.py	2016-02-18 16:28:31 +0000
+++ duplicity/backends/multibackend.py	2016-06-29 22:46:17 +0000
@@ -36,7 +36,8 @@
 
 
 class MultiBackend(duplicity.backend.Backend):
-    """Store files across multiple remote stores. URL is a path to a local file containing URLs/other config defining the remote store"""
+    """Store files across multiple remote stores. URL is a path to a local file
+    containing URLs/other config defining the remote store"""
 
     # the stores we are managing
     __stores = []

=== modified file 'duplicity/backends/par2backend.py'
--- duplicity/backends/par2backend.py	2016-06-01 17:02:59 +0000
+++ duplicity/backends/par2backend.py	2016-06-29 22:46:17 +0000
@@ -117,7 +117,9 @@
             par2file = par2temp.append(remote_filename + '.par2')
             self.wrapped_backend._get(par2file.get_filename(), par2file)
 
-            par2verify = 'par2 v %s %s %s' % (self.common_options, par2file.get_canonical(), local_path_temp.get_canonical())
+            par2verify = 'par2 v %s %s %s' % (self.common_options,
+                                              par2file.get_canonical(),
+                                              local_path_temp.get_canonical())
             out, returncode = pexpect.run(par2verify, None, True)
 
             if returncode:
@@ -129,7 +131,9 @@
                     file = par2temp.append(filename)
                     self.wrapped_backend._get(filename, file)
 
-                par2repair = 'par2 r %s %s %s' % (self.common_options, par2file.get_canonical(), local_path_temp.get_canonical())
+                par2repair = 'par2 r %s %s %s' % (self.common_options,
+                                                  par2file.get_canonical(),
+                                                  local_path_temp.get_canonical())
                 out, returncode = pexpect.run(par2repair, None, True)
 
                 if returncode:

=== modified file 'duplicity/backends/pydrivebackend.py'
--- duplicity/backends/pydrivebackend.py	2016-04-18 14:10:25 +0000
+++ duplicity/backends/pydrivebackend.py	2016-06-29 22:46:17 +0000
@@ -52,7 +52,9 @@
         if 'GOOGLE_DRIVE_ACCOUNT_KEY' in os.environ:
             account_key = os.environ['GOOGLE_DRIVE_ACCOUNT_KEY']
             if self.oldClient:
-                credentials = SignedJwtAssertionCredentials(parsed_url.username + '@' + parsed_url.hostname, account_key,
+                credentials = SignedJwtAssertionCredentials(parsed_url.username +
+                                                            '@' + parsed_url.hostname,
+                                                            account_key,
                                                             scopes='https://www.googleapis.com/auth/drive')
             else:
                 signer = crypt.Signer.from_string(account_key)
@@ -65,7 +67,9 @@
             gauth = GoogleAuth(settings_file=os.environ['GOOGLE_DRIVE_SETTINGS'])
             gauth.CommandLineAuth()
         else:
-            raise BackendException('GOOGLE_DRIVE_ACCOUNT_KEY or GOOGLE_DRIVE_SETTINGS environment variable not set. Please read the manpage to fix.')
+            raise BackendException(
+                'GOOGLE_DRIVE_ACCOUNT_KEY or GOOGLE_DRIVE_SETTINGS environment '
+                'variable not set. Please read the manpage to fix.')
         self.drive = GoogleDrive(gauth)
 
         # Dirty way to find root folder id
@@ -82,10 +86,14 @@
         for folder_name in folder_names:
             if not folder_name:
                 continue
-            file_list = self.drive.ListFile({'q': "'" + parent_folder_id + "' in parents and trashed=false"}).GetList()
-            folder = next((item for item in file_list if item['title'] == folder_name and item['mimeType'] == 'application/vnd.google-apps.folder'), None)
+            file_list = self.drive.ListFile({'q': "'" + parent_folder_id +
+                                                  "' in parents and trashed=false"}).GetList()
+            folder = next((item for item in file_list if item['title'] == folder_name and
+                           item['mimeType'] == 'application/vnd.google-apps.folder'), None)
             if folder is None:
-                folder = self.drive.CreateFile({'title': folder_name, 'mimeType': "application/vnd.google-apps.folder", 'parents': [{'id': parent_folder_id}]})
+                folder = self.drive.CreateFile({'title': folder_name,
+                                                'mimeType': "application/vnd.google-apps.folder",
+                                                'parents': [{'id': parent_folder_id}]})
                 folder.Upload()
             parent_folder_id = folder['id']
         self.folder = parent_folder_id
@@ -102,14 +110,16 @@
                 if drive_file['title'] == filename and not drive_file['labels']['trashed']:
                     for parent in drive_file['parents']:
                         if parent['id'] == self.folder:
-                            log.Info("PyDrive backend: found file '%s' with id %s in ID cache" % (filename, file_id))
+                            log.Info("PyDrive backend: found file '%s' with id %s in ID cache" %
+                                     (filename, file_id))
                             return drive_file
             except ApiRequestError as error:
                 # A 404 occurs if the ID is no longer valid
                 if error.args[0].resp.status != 404:
                     raise
             # If we get here, the cache entry is invalid
-            log.Info("PyDrive backend: invalidating '%s' (previously ID %s) from ID cache" % (filename, file_id))
+            log.Info("PyDrive backend: invalidating '%s' (previously ID %s) from ID cache" %
+                     (filename, file_id))
             del self.id_cache[filename]
 
         # Not found in the cache, so use directory listing. This is less
@@ -122,9 +132,11 @@
         elif flist:
             file_id = flist[0]['id']
             self.id_cache[filename] = flist[0]['id']
-            log.Info("PyDrive backend: found file '%s' with id %s on server, adding to cache" % (filename, file_id))
+            log.Info("PyDrive backend: found file '%s' with id %s on server, "
+                     "adding to cache" % (filename, file_id))
             return flist[0]
-        log.Info("PyDrive backend: file '%s' not found in cache or on server" % (filename,))
+        log.Info("PyDrive backend: file '%s' not found in cache or on server" %
+                 (filename,))
         return None
 
     def id_by_name(self, filename):
@@ -138,7 +150,9 @@
         drive_file = self.file_by_name(remote_filename)
         if drive_file is None:
             # No existing file, make a new one
-            drive_file = self.drive.CreateFile({'title': remote_filename, 'parents': [{"kind": "drive#fileLink", "id": self.folder}]})
+            drive_file = self.drive.CreateFile({'title': remote_filename,
+                                                'parents': [{"kind": "drive#fileLink",
+                                                             "id": self.folder}]})
             log.Info("PyDrive backend: creating new file '%s'" % (remote_filename,))
         else:
             log.Info("PyDrive backend: replacing existing file '%s' with id '%s'" % (

=== modified file 'duplicity/backends/pyrax_identity/hubic.py'
--- duplicity/backends/pyrax_identity/hubic.py	2015-01-01 13:07:31 +0000
+++ duplicity/backends/pyrax_identity/hubic.py	2016-06-29 22:46:17 +0000
@@ -83,7 +83,8 @@
                 err = {}
 
             raise exc.AuthenticationFailed("Unable to get oauth access token, "
-                                           "wrong client_id or client_secret ? (%s)" % str(err))
+                                           "wrong client_id or client_secret ? (%s)" %
+                                           str(err))
 
         oauth_token = r.json()
 
@@ -98,7 +99,9 @@
             with open(TOKENS_FILE, 'wb') as configfile:
                 config.write(configfile)
         else:
-            raise exc.AuthenticationFailed("Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % str(err))
+            raise exc.AuthenticationFailed(
+                "Unable to get oauth access token, wrong client_id or client_secret ? (%s)" %
+                str(err))
 
         if oauth_token['refresh_token'] is not None:
             config.set("hubic", "refresh_token", oauth_token['refresh_token'])
@@ -160,12 +163,16 @@
                     except:
                         err = {}
 
-                    raise exc.AuthenticationFailed("Unable to get oauth access token, wrong client_id or client_secret ? (%s)" % str(err))
+                    raise exc.AuthenticationFailed(
+                        "Unable to get oauth access token, wrong client_id or client_secret ? (%s)" %
+                        str(err))
             else:
                 success = True
 
         if not success:
-            raise exc.AuthenticationFailed("All the attempts failed to get the refresh token: status_code = 509: Bandwidth Limit Exceeded")
+            raise exc.AuthenticationFailed(
+                "All the attempts failed to get the refresh token: "
+                "status_code = 509: Bandwidth Limit Exceeded")
 
         oauth_token = r.json()
 
@@ -203,14 +210,17 @@
                 oauth = lxml_html.document_fromstring(r.content).xpath('//input[@name="oauth"]')
                 oauth = oauth[0].value if oauth else None
             else:
-                oauth = re.search(r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>', r.content)
+                oauth = re.search(
+                    r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>',
+                    r.content)
                 oauth = oauth.group(1) if oauth else None
 
             if not oauth:
                 raise exc.AuthenticationFailed("Unable to get oauth_id from authorization page")
 
             if self._email is None or self._password is None:
-                raise exc.AuthenticationFailed("Cannot retrieve email and/or password. Please run expresslane-hubic-setup.sh")
+                raise exc.AuthenticationFailed("Cannot retrieve email and/or password. "
+                                               "Please run expresslane-hubic-setup.sh")
 
             r = requests.post(
                 OAUTH_ENDPOINT + 'auth/',
@@ -230,7 +240,8 @@
                 query = urlparse.urlsplit(r.headers['location']).query
                 code = dict(urlparse.parse_qsl(query))['code']
             except:
-                raise exc.AuthenticationFailed("Unable to authorize client_id, invalid login/password ?")
+                raise exc.AuthenticationFailed("Unable to authorize client_id, "
+                                               "invalid login/password ?")
 
             oauth_token = self._get_access_token(code)
 

=== modified file 'duplicity/backends/ssh_paramiko_backend.py'
--- duplicity/backends/ssh_paramiko_backend.py	2015-03-22 12:31:27 +0000
+++ duplicity/backends/ssh_paramiko_backend.py	2016-06-29 22:46:17 +0000
@@ -42,18 +42,22 @@
 
 class SSHParamikoBackend(duplicity.backend.Backend):
     """This backend accesses files using the sftp or scp protocols.
-    It does not need any local client programs, but an ssh server and the sftp program must be installed on the remote
-    side (or with scp, the programs scp, ls, mkdir, rm and a POSIX-compliant shell).
+    It does not need any local client programs, but an ssh server and the sftp
+    program must be installed on the remote side (or with scp, the programs
+    scp, ls, mkdir, rm and a POSIX-compliant shell).
 
-    Authentication keys are requested from an ssh agent if present, then ~/.ssh/id_rsa/dsa are tried.
-    If -oIdentityFile=path is present in --ssh-options, then that file is also tried.
-    The passphrase for any of these keys is taken from the URI or FTP_PASSWORD.
-    If none of the above are available, password authentication is attempted (using the URI or FTP_PASSWORD).
+    Authentication keys are requested from an ssh agent if present, then
+    ~/.ssh/id_rsa/dsa are tried. If -oIdentityFile=path is present in
+    --ssh-options, then that file is also tried. The passphrase for any of
+    these keys is taken from the URI or FTP_PASSWORD. If none of the above are
+    available, password authentication is attempted (using the URI or
+    FTP_PASSWORD).
 
     Missing directories on the remote side will be created.
 
-    If scp is active then all operations on the remote side require passing arguments through a shell,
-    which introduces unavoidable quoting issues: directory and file names that contain single quotes will not work.
+    If scp is active then all operations on the remote side require passing
+    arguments through a shell, which introduces unavoidable quoting issues:
+    directory and file names that contain single quotes will not work.
     This problem does not exist with sftp.
     """
     def __init__(self, parsed_url):
@@ -68,8 +72,9 @@
             self.remote_dir = '.'
 
         # lazily import paramiko when we need it
-        # debian squeeze's paramiko is a bit old, so we silence randompool depreciation warning
-        # note also: passphrased private keys work with squeeze's paramiko only if done with DES, not AES
+        # debian squeeze's paramiko is a bit old, so we silence randompool
+        # depreciation warning note also: passphrased private keys work with
+        # squeeze's paramiko only if done with DES, not AES
         import warnings
         warnings.simplefilter("ignore")
         import paramiko
@@ -80,19 +85,23 @@
             Policy for showing a yes/no prompt and adding the hostname and new
             host key to the known host file accordingly.
 
-            This class simply extends the AutoAddPolicy class with a yes/no prompt.
+            This class simply extends the AutoAddPolicy class with a yes/no
+            prompt.
             """
             def missing_host_key(self, client, hostname, key):
                 fp = hexlify(key.get_fingerprint())
                 fingerprint = ':'.join(a + b for a, b in zip(fp[::2], fp[1::2]))
                 question = """The authenticity of host '%s' can't be established.
 %s key fingerprint is %s.
-Are you sure you want to continue connecting (yes/no)? """ % (hostname, key.get_name().upper(), fingerprint)
+Are you sure you want to continue connecting (yes/no)? """ % (hostname,
+                                                              key.get_name().upper(),
+                                                              fingerprint)
                 while True:
                     sys.stdout.write(question)
                     choice = raw_input().lower()
                     if choice in ['yes', 'y']:
-                        paramiko.AutoAddPolicy.missing_host_key(self, client, hostname, key)
+                        paramiko.AutoAddPolicy.missing_host_key(self, client,
+                                                                hostname, key)
                         return
                     elif choice in ['no', 'n']:
                         raise AuthenticityException(hostname)
@@ -101,7 +110,9 @@
 
         class AuthenticityException (paramiko.SSHException):
             def __init__(self, hostname):
-                paramiko.SSHException.__init__(self, 'Host key verification for server %s failed.' % hostname)
+                paramiko.SSHException.__init__(self,
+                                               'Host key verification for server %s failed.' %
+                                               hostname)
 
         self.client = paramiko.SSHClient()
         self.client.set_missing_host_key_policy(AgreedAddPolicy())
@@ -115,7 +126,8 @@
         ours.addHandler(dest)
 
         # ..and the duplicity levels are neither linear,
-        # nor are the names compatible with python logging, eg. 'NOTICE'...WAAAAAH!
+        # nor are the names compatible with python logging,
+        # eg. 'NOTICE'...WAAAAAH!
         plevel = logging.getLogger("duplicity").getEffectiveLevel()
         if plevel <= 1:
             wanted = logging.DEBUG
@@ -135,7 +147,8 @@
             if os.path.isfile("/etc/ssh/ssh_known_hosts"):
                 self.client.load_system_host_keys("/etc/ssh/ssh_known_hosts")
         except Exception as e:
-            raise BackendException("could not load /etc/ssh/ssh_known_hosts, maybe corrupt?")
+            raise BackendException("could not load /etc/ssh/ssh_known_hosts, "
+                                   "maybe corrupt?")
         try:
             # use load_host_keys() to signal it's writable to paramiko
             # load if file exists or add filename to create it if needed
@@ -145,7 +158,8 @@
             else:
                 self.client._host_keys_filename = file
         except Exception as e:
-            raise BackendException("could not load ~/.ssh/known_hosts, maybe corrupt?")
+            raise BackendException("could not load ~/.ssh/known_hosts, "
+                                   "maybe corrupt?")
 
         """ the next block reorganizes all host parameters into a
         dictionary like SSHConfig does. this dictionary 'self.config'
@@ -155,9 +169,11 @@
         """
         self.config = {'hostname': parsed_url.hostname}
         # get system host config entries
-        self.config.update(self.gethostconfig('/etc/ssh/ssh_config', parsed_url.hostname))
+        self.config.update(self.gethostconfig('/etc/ssh/ssh_config',
+                                              parsed_url.hostname))
         # update with user's config file
-        self.config.update(self.gethostconfig('~/.ssh/config', parsed_url.hostname))
+        self.config.update(self.gethostconfig('~/.ssh/config',
+                                              parsed_url.hostname))
         # update with url values
         # username from url
         if parsed_url.username:
@@ -174,7 +190,8 @@
         else:
             self.config.update({'port': 22})
         # parse ssh options for alternative ssh private key, identity file
-        m = re.search("^(?:.+\s+)?(?:-oIdentityFile=|-i\s+)(([\"'])([^\\2]+)\\2|[\S]+).*", globals.ssh_options)
+        m = re.search("^(?:.+\s+)?(?:-oIdentityFile=|-i\s+)(([\"'])([^\\2]+)\\2|[\S]+).*",
+                      globals.ssh_options)
         if (m is not None):
             keyfilename = m.group(3) if m.group(3) else m.group(1)
             self.config['identityfile'] = keyfilename
@@ -218,7 +235,8 @@
                 self.config['port'], e))
         self.client.get_transport().set_keepalive((int)(globals.timeout / 2))
 
-        self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, 'paramiko')
+        self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme,
+                                                     'paramiko')
         self.use_scp = (self.scheme == 'scp')
 
         # scp or sftp?
@@ -251,13 +269,16 @@
                             try:
                                 self.sftp.mkdir(d)
                             except Exception as e:
-                                raise BackendException("sftp mkdir %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e))
+                                raise BackendException("sftp mkdir %s failed: %s" %
+                                                       (self.sftp.normalize(".") + "/" + d, e))
                         else:
-                            raise BackendException("sftp stat %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e))
+                            raise BackendException("sftp stat %s failed: %s" %
+                                                   (self.sftp.normalize(".") + "/" + d, e))
                     try:
                         self.sftp.chdir(d)
                     except Exception as e:
-                        raise BackendException("sftp chdir to %s failed: %s" % (self.sftp.normalize(".") + "/" + d, e))
+                        raise BackendException("sftp chdir to %s failed: %s" %
+                                               (self.sftp.normalize(".") + "/" + d, e))
 
     def _put(self, source_path, remote_filename):
         if self.use_scp:
@@ -265,16 +286,19 @@
             try:
                 chan = self.client.get_transport().open_session()
                 chan.settimeout(globals.timeout)
-                chan.exec_command("scp -t '%s'" % self.remote_dir)  # scp in sink mode uses the arg as base directory
+                # scp in sink mode uses the arg as base directory
+                chan.exec_command("scp -t '%s'" % self.remote_dir)
             except Exception as e:
                 raise BackendException("scp execution failed: %s" % e)
-            # scp protocol: one 0x0 after startup, one after the Create meta, one after saving
-            # if there's a problem: 0x1 or 0x02 and some error text
+            # scp protocol: one 0x0 after startup, one after the Create meta,
+            # one after saving if there's a problem: 0x1 or 0x02 and some error
+            # text
             response = chan.recv(1)
             if (response != "\0"):
                 raise BackendException("scp remote error: %s" % chan.recv(-1))
             fstat = os.stat(source_path.name)
-            chan.send('C%s %d %s\n' % (oct(fstat.st_mode)[-4:], fstat.st_size, remote_filename))
+            chan.send('C%s %d %s\n' % (oct(fstat.st_mode)[-4:], fstat.st_size,
+                                       remote_filename))
             response = chan.recv(1)
             if (response != "\0"):
                 raise BackendException("scp remote error: %s" % chan.recv(-1))
@@ -292,7 +316,8 @@
             try:
                 chan = self.client.get_transport().open_session()
                 chan.settimeout(globals.timeout)
-                chan.exec_command("scp -f '%s/%s'" % (self.remote_dir, remote_filename))
+                chan.exec_command("scp -f '%s/%s'" % (self.remote_dir,
+                                                      remote_filename))
             except Exception as e:
                 raise BackendException("scp execution failed: %s" % e)
 
@@ -300,7 +325,8 @@
             msg = chan.recv(-1)
             m = re.match(r"C([0-7]{4})\s+(\d+)\s+(\S.*)$", msg)
             if (m is None or m.group(3) != remote_filename):
-                raise BackendException("scp get %s failed: incorrect response '%s'" % (remote_filename, msg))
+                raise BackendException("scp get %s failed: incorrect response '%s'" %
+                                       (remote_filename, msg))
             chan.recv(1)  # dispose of the newline trailing the C message
 
             size = int(m.group(2))
@@ -321,7 +347,8 @@
 
             msg = chan.recv(1)  # check the final status
             if msg != '\0':
-                raise BackendException("scp get %s failed: %s" % (remote_filename, chan.recv(-1)))
+                raise BackendException("scp get %s failed: %s" % (remote_filename,
+                                                                  chan.recv(-1)))
             f.close()
             chan.send('\0')  # send final done indicator
             chan.close()
@@ -332,7 +359,8 @@
         # In scp mode unavoidable quoting issues will make this fail if the
         # directory name contains single quotes.
         if self.use_scp:
-            output = self.runremote("ls -1 '%s'" % self.remote_dir, False, "scp dir listing ")
+            output = self.runremote("ls -1 '%s'" % self.remote_dir, False,
+                                    "scp dir listing ")
             return output.splitlines()
         else:
             return self.sftp.listdir()
@@ -341,13 +369,15 @@
         # In scp mode unavoidable quoting issues will cause failures if
         # filenames containing single quotes are encountered.
         if self.use_scp:
-            self.runremote("rm '%s/%s'" % (self.remote_dir, filename), False, "scp rm ")
+            self.runremote("rm '%s/%s'" % (self.remote_dir, filename), False,
+                           "scp rm ")
         else:
             self.sftp.remove(filename)
 
     def runremote(self, cmd, ignoreexitcode=False, errorprefix=""):
-        """small convenience function that opens a shell channel, runs remote command and returns
-        stdout of command. throws an exception if exit code!=0 and not ignored"""
+        """small convenience function that opens a shell channel, runs remote
+        command and returns stdout of command. throws an exception if exit
+        code!=0 and not ignored"""
         try:
             chan = self.client.get_transport().open_session()
             chan.settimeout(globals.timeout)
@@ -357,7 +387,8 @@
         output = chan.recv(-1)
         res = chan.recv_exit_status()
         if (res != 0 and not ignoreexitcode):
-            raise BackendException("%sfailed(%d): %s" % (errorprefix, res, chan.recv_stderr(4096)))
+            raise BackendException("%sfailed(%d): %s" % (errorprefix, res,
+                                                         chan.recv_stderr(4096)))
         return output
 
     def gethostconfig(self, file, host):

=== modified file 'duplicity/backends/webdavbackend.py'
--- duplicity/backends/webdavbackend.py	2016-03-04 10:17:19 +0000
+++ duplicity/backends/webdavbackend.py	2016-06-29 22:46:17 +0000
@@ -76,7 +76,8 @@
 
             # check if file is accessible (libssl errors are not very detailed)
             if self.cacert_file and not os.access(self.cacert_file, os.R_OK):
-                raise FatalBackendException(_("Cacert database file '%s' is not readable.") % self.cacert_file)
+                raise FatalBackendException(_("Cacert database file '%s' is not readable.") %
+                                            self.cacert_file)
 
         def connect(self):
             # create new socket
@@ -88,19 +89,25 @@
 
             # python 2.7.9+ supports default system certs now
             if "create_default_context" in dir(ssl):
-                context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cacert_file, capath=globals.ssl_cacert_path)
+                context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH,
+                                                     cafile=self.cacert_file,
+                                                     capath=globals.ssl_cacert_path)
                 self.sock = context.wrap_socket(sock, server_hostname=self.host)
             # the legacy way needing a cert file
             else:
                 if globals.ssl_cacert_path:
-                    raise FatalBackendException(_("Option '--ssl-cacert-path' is not supported with python 2.7.8 and below."))
+                    raise FatalBackendException(
+                        _("Option '--ssl-cacert-path' is not supported "
+                          "with python 2.7.8 and below."))
 
                 if not self.cacert_file:
                     raise FatalBackendException(_("""\
-For certificate verification with python 2.7.8 or earlier a cacert database file is needed in one of these locations: %s
+For certificate verification with python 2.7.8 or earlier a cacert database
+file is needed in one of these locations: %s
 Hints:
   Consult the man page, chapter 'SSL Certificate Verification'.
-  Consider using the options --ssl-cacert-file, --ssl-no-check-certificate .""") % ", ".join(self.cacert_candidates))
+  Consider using the options --ssl-cacert-file, --ssl-no-check-certificate .""") %
+                                                ", ".join(self.cacert_candidates))
 
                 # wrap the socket in ssl using verification
                 self.sock = ssl.wrap_socket(sock,
@@ -113,7 +120,8 @@
                 return httplib.HTTPSConnection.request(self, *args, **kwargs)
             except ssl.SSLError as e:
                 # encapsulate ssl errors
-                raise BackendException("SSL failed: %s" % util.uexc(e), log.ErrorCode.backend_error)
+                raise BackendException("SSL failed: %s" % util.uexc(e),
+                                       log.ErrorCode.backend_error)
 
 
 class WebDAVBackend(duplicity.backend.Backend):
@@ -140,7 +148,8 @@
         self.directory = self.sanitize_path(parsed_url.path)
 
         log.Info(_("Using WebDAV protocol %s") % (globals.webdav_proto,))
-        log.Info(_("Using WebDAV host %s port %s") % (parsed_url.hostname, parsed_url.port))
+        log.Info(_("Using WebDAV host %s port %s") % (parsed_url.hostname,
+                                                      parsed_url.port))
         log.Info(_("Using WebDAV directory %s") % (self.directory,))
 
         self.conn = None
@@ -292,7 +301,8 @@
         hostname = u.port and "%s:%s" % (u.hostname, u.port) or u.hostname
         dummy_url = "%s://%s%s" % (scheme, hostname, path)
         dummy_req = CustomMethodRequest(self.conn._method, dummy_url)
-        auth_string = self.digest_auth_handler.get_authorization(dummy_req, self.digest_challenge)
+        auth_string = self.digest_auth_handler.get_authorization(dummy_req,
+                                                                 self.digest_challenge)
         return 'Digest %s' % auth_string
 
     def _list(self):
@@ -351,7 +361,8 @@
 
                 res = self.request("MKCOL", d)
                 if res.status != 201:
-                    raise BackendException(_("WebDAV MKCOL %s failed: %s %s") % (d, res.status, res.reason))
+                    raise BackendException(_("WebDAV MKCOL %s failed: %s %s") %
+                                           (d, res.status, res.reason))
 
     def taste_href(self, href):
         """
@@ -400,14 +411,16 @@
                 # data=response.read()
                 target_file.write(response.read())
                 # import hashlib
-                # log.Info("WebDAV GOT %s bytes with md5=%s" % (len(data),hashlib.md5(data).hexdigest()) )
+                # log.Info("WebDAV GOT %s bytes with md5=%s" %
+                # (len(data),hashlib.md5(data).hexdigest()) )
                 assert not target_file.close()
                 response.close()
             else:
                 status = response.status
                 reason = response.reason
                 response.close()
-                raise BackendException(_("WebDAV GET Bad status code %s reason %s.") % (status, reason))
+                raise BackendException(_("WebDAV GET Bad status code %s reason %s.") %
+                                       (status, reason))
         except Exception as e:
             raise e
         finally:
@@ -428,7 +441,8 @@
                 status = response.status
                 reason = response.reason
                 response.close()
-                raise BackendException(_("WebDAV PUT Bad status code %s reason %s.") % (status, reason))
+                raise BackendException(_("WebDAV PUT Bad status code %s reason %s.") %
+                                       (status, reason))
         except Exception as e:
             raise e
         finally:
@@ -447,7 +461,8 @@
                 status = response.status
                 reason = response.reason
                 response.close()
-                raise BackendException(_("WebDAV DEL Bad status code %s reason %s.") % (status, reason))
+                raise BackendException(_("WebDAV DEL Bad status code %s reason %s.") %
+                                       (status, reason))
         except Exception as e:
             raise e
         finally:

=== modified file 'duplicity/collections.py'
--- duplicity/collections.py	2016-06-24 15:57:28 +0000
+++ duplicity/collections.py	2016-06-29 22:46:17 +0000
@@ -713,7 +713,8 @@
         backup_chains = self.get_sorted_chains(backup_chains)
         self.all_backup_chains = backup_chains
 
-        assert len(backup_chains) == len(self.all_backup_chains), "get_sorted_chains() did something more than re-ordering"
+        assert len(backup_chains) == len(self.all_backup_chains), \
+            "get_sorted_chains() did something more than re-ordering"
 
         local_sig_chains, self.local_orphaned_sig_names = \
             self.get_signature_chains(True)
@@ -1007,7 +1008,11 @@
             # no chains are old enough, give oldest and warn user
             oldest = self.all_sig_chains[0]
             if time < oldest.start_time:
-                log.Warn(_("No signature chain for the requested time.  Using oldest available chain, starting at time %s.") % dup_time.timetopretty(oldest.start_time), log.WarningCode.no_sig_for_time, dup_time.timetostring(oldest.start_time))
+                log.Warn(_("No signature chain for the requested time. "
+                           "Using oldest available chain, starting at time %s.") %
+                         dup_time.timetopretty(oldest.start_time),
+                         log.WarningCode.no_sig_for_time,
+                         dup_time.timetostring(oldest.start_time))
             return oldest
 
     def get_extraneous(self, extra_clean):

=== modified file 'duplicity/file_naming.py'
--- duplicity/file_naming.py	2016-03-08 14:08:05 +0000
+++ duplicity/file_naming.py	2016-06-29 22:46:17 +0000
@@ -220,18 +220,23 @@
         assert not (volume_number and part_string)
         if type == "full-sig":
             if globals.short_filenames:
-                return (globals.file_prefix + globals.file_prefix_signature + "dfs.%s.st%s%s" %
+                return (globals.file_prefix + globals.file_prefix_signature +
+                        "dfs.%s.st%s%s" %
                         (to_base36(dup_time.curtime), part_string, suffix))
             else:
-                return (globals.file_prefix + globals.file_prefix_signature + "duplicity-full-signatures.%s.sigtar%s%s" %
+                return (globals.file_prefix + globals.file_prefix_signature +
+                        "duplicity-full-signatures.%s.sigtar%s%s" %
                         (dup_time.curtimestr, part_string, suffix))
         elif type == "new-sig":
             if globals.short_filenames:
-                return (globals.file_prefix + globals.file_prefix_signature + "dns.%s.%s.st%s%s" %
-                        (to_base36(dup_time.prevtime), to_base36(dup_time.curtime),
+                return (globals.file_prefix + globals.file_prefix_signature +
+                        "dns.%s.%s.st%s%s" %
+                        (to_base36(dup_time.prevtime),
+                         to_base36(dup_time.curtime),
                          part_string, suffix))
             else:
-                return (globals.file_prefix + globals.file_prefix_signature + "duplicity-new-signatures.%s.to.%s.sigtar%s%s" %
+                return (globals.file_prefix + globals.file_prefix_signature +
+                        "duplicity-new-signatures.%s.to.%s.sigtar%s%s" %
                         (dup_time.prevtimestr, dup_time.curtimestr,
                          part_string, suffix))
     else:

=== modified file 'duplicity/manifest.py'
--- duplicity/manifest.py	2016-06-24 15:57:28 +0000
+++ duplicity/manifest.py	2016-06-29 22:46:17 +0000
@@ -99,7 +99,8 @@
                        "Current directory: %s\n"
                        "Previous directory: %s") % (globals.local_path.name, self.local_dirname)  # @UndefinedVariable
             code = log.ErrorCode.source_dir_mismatch
-            code_extra = "%s %s" % (util.escape(globals.local_path.name), util.escape(self.local_dirname))  # @UndefinedVariable
+            code_extra = "%s %s" % (util.escape(globals.local_path.name),
+                                    util.escape(self.local_dirname))  # @UndefinedVariable
         else:
             return
 

=== modified file 'duplicity/progress.py'
--- duplicity/progress.py	2015-01-31 23:30:49 +0000
+++ duplicity/progress.py	2016-06-29 22:46:17 +0000
@@ -67,7 +67,8 @@
                 snapshot = pickle.load(progressfd)
                 progressfd.close()
             except:
-                log.Warn("Warning, cannot read stored progress information from previous backup", log.WarningCode.cannot_stat)
+                log.Warn("Warning, cannot read stored progress information from previous backup",
+                         log.WarningCode.cannot_stat)
                 snapshot = Snapshot()
         # Reached here no cached data found or wrong marshalling
         return snapshot
@@ -204,17 +205,20 @@
 
             """
             Combine variables for progress estimation
-            Fit a smoothed curve that covers the most common data density distributions, aiming for a large number of incremental changes.
+            Fit a smoothed curve that covers the most common data density distributions,
+            aiming for a large number of incremental changes.
             The computation is:
-                Use 50% confidence interval lower bound during first half of the progression. Conversely, use 50% C.I. upper bound during
-                the second half. Scale it to the changes/total ratio
+                Use 50% confidence interval lower bound during first half of the progression.
+                Conversely, use 50% C.I. upper bound during the second half. Scale it to the
+                changes/total ratio
             """
             self.current_estimation = float(changes) / float(total_changes) * (
                 (self.change_mean_ratio - 0.67 * change_sigma) * (1.0 - self.current_estimation) +
                 (self.change_mean_ratio + 0.67 * change_sigma) * self.current_estimation
             )
             """
-            In case that we overpassed the 100%, drop the confidence and trust more the mean as the sigma may be large.
+            In case that we overpassed the 100%, drop the confidence and trust more the mean as the
+            sigma may be large.
             """
             if self.current_estimation > 1.0:
                 self.current_estimation = float(changes) / float(total_changes) * (
@@ -228,15 +232,21 @@
                 self.current_estimation = self.change_mean_ratio * float(changes) / float(total_changes)
 
         """
-        Lastly, just cap it... nothing else we can do to approximate it better. Cap it to 99%, as the remaining 1% to 100% we reserve it
-        For the last step uploading of signature and manifests
+        Lastly, just cap it... nothing else we can do to approximate it better.
+        Cap it to 99%, as the remaining 1% to 100% we reserve for the last step
+        uploading of signature and manifests
         """
-        self.progress_estimation = max(0.0, min(self.prev_estimation + (1.0 - self.prev_estimation) * self.current_estimation, 0.99))
+        self.progress_estimation = max(0.0, min(self.prev_estimation +
+                                                (1.0 - self.prev_estimation) *
+                                                self.current_estimation, 0.99))
 
         """
-        Estimate the time just as a projection of the remaining time, fit to a [(1 - x) / x] curve
+        Estimate the time just as a projection of the remaining time, fit to a
+        [(1 - x) / x] curve
         """
-        self.elapsed_sum += elapsed  # As sum of timedeltas, so as to avoid clock skew in long runs (adding also microseconds)
+        # As sum of timedeltas, so as to avoid clock skew in long runs
+        # (adding also microseconds)
+        self.elapsed_sum += elapsed
         projection = 1.0
         if self.progress_estimation > 0:
             projection = (1.0 - self.progress_estimation) / self.progress_estimation
@@ -250,7 +260,8 @@
         Compute Exponential Moving Average of speed as bytes/sec of the last 30 probes
         """
         if elapsed.total_seconds() > 0:
-            self.transfers.append(float(self.total_bytecount - self.last_total_bytecount) / float(elapsed.total_seconds()))
+            self.transfers.append(float(self.total_bytecount - self.last_total_bytecount) /
+                                  float(elapsed.total_seconds()))
         self.last_total_bytecount = self.total_bytecount
         if len(self.transfers) > 30:
             self.transfers.popleft()

=== modified file 'duplicity/tempdir.py'
--- duplicity/tempdir.py	2015-10-31 20:29:11 +0000
+++ duplicity/tempdir.py	2016-06-29 22:46:17 +0000
@@ -257,7 +257,8 @@
                 try:
                     os.rmdir(self.__dir)
                 except Exception:
-                    log.Warn(_("Cleanup of temporary directory %s failed - this is probably a bug.") % util.ufn(self.__dir))
+                    log.Warn(_("Cleanup of temporary directory %s failed - "
+                               "this is probably a bug.") % util.ufn(self.__dir))
                     pass
                 self.__pending = None
                 self.__dir = None

=== modified file 'testing/test_code.py'
--- testing/test_code.py	2015-12-23 15:37:06 +0000
+++ testing/test_code.py	2016-06-29 22:46:17 +0000
@@ -90,12 +90,12 @@
     def test_pep8(self):
         ignores = [
             "E402",  # module level import not at top of file
-            "E501",  # line too long
             "E731",  # do not assign a lambda expression, use a def
             "W503",  # line break before binary operator
         ]
         self.run_checker(["pep8",
                           "--ignore=" + ','.join(ignores),
+                          "--max-line-length=120",
                           os.path.join(_top_dir, 'duplicity'),
                           os.path.join(_top_dir, 'bin/duplicity'),
                           os.path.join(_top_dir, 'bin/rdiffdir')])


Follow ups