← Back to team overview

duplicity-team team mailing list archive

[Merge] lp:~verb/duplicity/boto-gcs into lp:duplicity

 

Lee Verberne has proposed merging lp:~verb/duplicity/boto-gcs into lp:duplicity.

Requested reviews:
  duplicity-team (duplicity-team)
Related bugs:
  Bug #1030868 in Duplicity: "Support Google Cloud Storage"
  https://bugs.launchpad.net/duplicity/+bug/1030868

For more details, see:
https://code.launchpad.net/~verb/duplicity/boto-gcs/+merge/178399

These patches add support for Google Cloud Storage via the boto backend.

boto has supported GCS in interoperability mode for a few years now.  This change adds support by taking advantage of boto's storage_uri abstraction layer.
-- 
https://code.launchpad.net/~verb/duplicity/boto-gcs/+merge/178399
Your team duplicity-team is requested to review the proposed merge of lp:~verb/duplicity/boto-gcs into lp:duplicity.
=== modified file 'bin/duplicity.1'
--- bin/duplicity.1	2013-04-27 14:10:11 +0000
+++ bin/duplicity.1	2013-08-02 23:03:25 +0000
@@ -16,7 +16,7 @@
 
 Some backends also require additional components (probably available as packages for your specific platform):
 .TP
-.BR "boto backend" " (S3 Amazon Web Services)"
+.BR "boto backend" " (S3 Amazon Web Services, Google Cloud Storage)"
 .B boto
 - http://github.com/boto/boto
 .TP
@@ -936,7 +936,7 @@
 it is permitted however.
 Consider setting the environment variable 
 .B FTP_PASSWORD 
-instead, which is used by most, if not all backends, regardless of it's name.
+instead, which is used by most, if not all backends, regardless of its name.
 .PP
 In protocols that support it, the path may be preceded by a single
 slash, '/path', to represent a relative path to the target home directory,
@@ -957,6 +957,10 @@
 .PP
 gdocs://user[:password]@other.host/some_dir
 .PP
+.BI "Google Cloud Storage"
+.br
+gs://bucket[/prefix]
+.PP
 hsi://user[:password]@other.host/some_dir
 .PP
 imap[s]://user[:password]@host.com[/from_address_prefix]
@@ -1363,6 +1367,25 @@
 or HTTP errors when trying to upload files to your newly created
 bucket. Give it a few minutes and the bucket should function normally.
 
+.SH A NOTE ON GOOGLE CLOUD STORAGE
+Support for Google Cloud Storage relies on its Interoperable Access,
+which must be enabled for your account.  Once enabled, you can generate
+Interoperable Storage Access Keys and pass them to duplicity via the
+.B GS_ACCESS_KEY_ID
+and
+.B GS_SECRET_ACCESS_KEY
+environment variables. Alternatively, you can run
+.B "gsutil config -a"
+to have the Google Cloud Storage utility populate the
+.B ~/.boto
+configuration file.
+.PP
+Enable Interoperable Access: 
+https://code.google.com/apis/console#:storage
+.br
+Create Access Keys:
+https://code.google.com/apis/console#:storage:legacy
+
 .SH A NOTE ON IMAP
 An IMAP account can be used as a target for the upload.  The userid may
 be specified and the password will be requested.

=== modified file 'duplicity/backends/_boto_single.py'
--- duplicity/backends/_boto_single.py	2013-01-25 13:35:33 +0000
+++ duplicity/backends/_boto_single.py	2013-08-02 23:03:25 +0000
@@ -50,8 +50,6 @@
         import boto
         assert boto.Version >= BOTO_MIN_VERSION
 
-        from boto.s3.key import Key
-
         # This folds the null prefix and all null parts, which means that:
         #  //MyBucket/ and //MyBucket are equivalent.
         #  //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
@@ -66,8 +64,6 @@
 
         self.scheme = parsed_url.scheme
 
-        self.key_class = Key
-
         if self.url_parts:
             self.key_prefix = '%s/' % '/'.join(self.url_parts)
         else:
@@ -75,6 +71,12 @@
 
         self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
         self.parsed_url = parsed_url
+
+        # duplicity and boto.storage_uri() have different URI formats.
+        # boto uses scheme://bucket[/name] and specifies hostname on connect()
+        self.boto_uri_str = '://'.join((parsed_url.scheme[:2],
+                                        parsed_url.path.lstrip('/')))
+        self.storage_uri = boto.storage_uri(self.boto_uri_str)
         self.resetConnection()
 
     def resetConnection(self):
@@ -140,12 +142,13 @@
                            "(http://code.google.com/p/boto/)." % BOTO_MIN_VERSION,
                            log.ErrorCode.boto_lib_too_old)
 
-        if self.scheme == 's3+http':
-            # Use the default Amazon S3 host.
-            self.conn = S3Connection(is_secure=(not globals.s3_unencrypted_connection))
+        if not self.parsed_url.hostname:
+            # Use the default host.
+            self.conn = self.storage_uri.connect(
+                is_secure=(not globals.s3_unencrypted_connection))
         else:
             assert self.scheme == 's3'
-            self.conn = S3Connection(
+            self.conn = self.storage_uri.connect(
                 host=self.parsed_url.hostname,
                 is_secure=(not globals.s3_unencrypted_connection))
 
@@ -199,7 +202,7 @@
 
         if not remote_filename:
             remote_filename = source_path.get_filename()
-        key = self.key_class(self.bucket)
+        key = self.storage_uri.new_key()
         key.key = self.key_prefix + remote_filename
 
         for n in range(1, globals.num_retries+1):
@@ -236,7 +239,7 @@
         raise BackendException("Error uploading %s/%s" % (self.straight_url, remote_filename))
 
     def get(self, remote_filename, local_path):
-        key = self.key_class(self.bucket)
+        key = self.storage_uri.new_key()
         key.key = self.key_prefix + remote_filename
         for n in range(1, globals.num_retries+1):
             if n > 1:
@@ -326,5 +329,6 @@
             else:
                 return {'size': None}
 
+duplicity.backend.register_backend("gs", BotoBackend)
 duplicity.backend.register_backend("s3", BotoBackend)
 duplicity.backend.register_backend("s3+http", BotoBackend)

=== modified file 'testing/manual/backendtest.py'
--- testing/manual/backendtest.py	2011-11-17 15:59:54 +0000
+++ testing/manual/backendtest.py	2013-08-02 23:03:25 +0000
@@ -188,6 +188,19 @@
     password = config.ftp_password
 
 
+class gsModuleTest(unittest.TestCase, UnivTest):
+    """ Test the gs module backend """
+    def setUp(self):
+        assert not os.system("tar xzf testfiles.tar.gz > /dev/null 2>&1")
+
+    def tearDown(self):
+        assert not os.system("rm -rf testfiles tempdir temp2.tar")
+
+    my_test_id = "gs/boto"
+    url_string = config.gs_url
+    password = None
+
+
 class rsyncAbsPathTest(unittest.TestCase, UnivTest):
     """ Test the rsync abs path backend """
     def setUp(self):

=== modified file 'testing/manual/config.py.tmpl'
--- testing/manual/config.py.tmpl	2012-09-13 14:08:52 +0000
+++ testing/manual/config.py.tmpl	2013-08-02 23:03:25 +0000
@@ -65,6 +65,10 @@
 ftp_url = None
 ftp_password = None
 
+gs_url = None
+gs_access_key = None
+gs_secret_key = None
+
 rsync_abspath_url = None
 rsync_relpath_url = None
 rsync_module_url = None
@@ -108,6 +112,12 @@
 
     set_environ("FTP_PASSWORD", None)
     set_environ("PASSPHRASE", None)
+    if gs_access_key:
+        set_environ("GS_ACCESS_KEY_ID", gs_access_key)
+        set_environ("GS_SECRET_ACCESS_KEY", gs_secret_key)
+    else:
+        set_environ("GS_ACCESS_KEY_ID", None)
+        set_environ("GS_SECRET_ACCESS_KEY", None)
     if s3_access_key:
         set_environ("AWS_ACCESS_KEY_ID", s3_access_key)
         set_environ("AWS_SECRET_ACCESS_KEY", s3_secret_key)

=== modified file 'testing/tests/test_tarfile.py'
--- testing/tests/test_tarfile.py	2011-11-04 04:27:29 +0000
+++ testing/tests/test_tarfile.py	2013-08-02 23:03:25 +0000
@@ -193,6 +193,7 @@
         self.extract_and_compare_tarfile()
 
     def extract_and_compare_tarfile(self):
+        old_umask = os.umask(022)
         os.system("rm -r tempdir")
         assert not os.system("tar -xf temp2.tar")
 
@@ -224,6 +225,7 @@
         s = os.lstat("tempdir/symlink")
         assert stat.S_ISLNK(s.st_mode)
 
+        os.umask(old_umask)
 
 class Test_FObj(BaseTest):
     """Test for read operations via file-object.


Follow ups