← Back to team overview

testtools-dev team mailing list archive

[Merge] lp:~lifeless/testtools/bug-1088693 into lp:testtools

 

Robert Collins has proposed merging lp:~lifeless/testtools/bug-1088693 into lp:testtools.

Requested reviews:
  testtools committers (testtools-committers)
Related bugs:
  Bug #1088693 in testtools: "cannot use content_from_stream with StringIO - does not seek to the start"
  https://bugs.launchpad.net/testtools/+bug/1088693

For more details, see:
https://code.launchpad.net/~lifeless/testtools/bug-1088693/+merge/139114

Fix a small bug to make content_from_stream easier to use.
-- 
https://code.launchpad.net/~lifeless/testtools/bug-1088693/+merge/139114
Your team testtools developers is subscribed to branch lp:testtools.
=== modified file 'NEWS'
--- NEWS	2012-10-25 14:26:11 +0000
+++ NEWS	2012-12-10 23:06:25 +0000
@@ -6,6 +6,12 @@
 NEXT
 ~~~~
 
+Improvements
+------------
+
+* ``content_from_file`` and ``content_from_stream`` now accept seek_offset and
+  seek_whence parameters allowing them to be used to grab less than the full
+  stream, or to be used with StringIO streams. (Robert Collins, #1088693)
 
 0.9.21
 ~~~~~~

=== modified file 'testtools/content.py'
--- testtools/content.py	2012-10-18 15:29:07 +0000
+++ testtools/content.py	2012-12-10 23:06:25 +0000
@@ -33,12 +33,16 @@
 STDERR_LINE = '\nStderr:\n%s'
 
 
-def _iter_chunks(stream, chunk_size):
+def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
     """Read 'stream' in chunks of 'chunk_size'.
 
     :param stream: A file-like object to read from.
     :param chunk_size: The size of each read from 'stream'.
+    :param seek_offset: If non-None, seek before iterating.
+    :param seek_whence: Pass through to the seek call, if seeking.
     """
+    if seek_offset is not None:
+        stream.seek(seek_offset, seek_whence)
     chunk = stream.read(chunk_size)
     while chunk:
         yield chunk
@@ -215,7 +219,7 @@
 
 
 def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
-                      buffer_now=False):
+                      buffer_now=False, seek_offset=None, seek_whence=0):
     """Create a `Content` object from a file on disk.
 
     Note that unless 'read_now' is explicitly passed in as True, the file
@@ -228,6 +232,8 @@
         Defaults to ``DEFAULT_CHUNK_SIZE``.
     :param buffer_now: If True, read the file from disk now and keep it in
         memory. Otherwise, only read when the content is serialized.
+    :param seek_offset: If non-None, seek within the stream before reading it.
+    :param seek_whence: If supplied, pass to stream.seek() when seeking.
     """
     if content_type is None:
         content_type = UTF8_TEXT
@@ -236,14 +242,15 @@
         # We drop older python support we can make this use a context manager
         # for maximum simplicity.
         stream = open(path, 'rb')
-        for chunk in _iter_chunks(stream, chunk_size):
+        for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence):
             yield chunk
         stream.close()
     return content_from_reader(reader, content_type, buffer_now)
 
 
 def content_from_stream(stream, content_type=None,
-                        chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False):
+                        chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
+                        seek_offset=None, seek_whence=0):
     """Create a `Content` object from a file-like stream.
 
     Note that the stream will only be read from when ``iter_bytes`` is
@@ -257,10 +264,12 @@
         Defaults to ``DEFAULT_CHUNK_SIZE``.
     :param buffer_now: If True, reads from the stream right now. Otherwise,
         only reads when the content is serialized. Defaults to False.
+    :param seek_offset: If non-None, seek within the stream before reading it.
+    :param seek_whence: If supplied, pass to stream.seek() when seeking.
     """
     if content_type is None:
         content_type = UTF8_TEXT
-    reader = lambda: _iter_chunks(stream, chunk_size)
+    reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
     return content_from_reader(reader, content_type, buffer_now)
 
 

=== modified file 'testtools/tests/test_content.py'
--- testtools/tests/test_content.py	2012-10-18 15:29:07 +0000
+++ testtools/tests/test_content.py	2012-12-10 23:06:25 +0000
@@ -125,6 +125,27 @@
         self.assertThat(
             ''.join(content.iter_text()), Equals('some data'))
 
+    def test_from_file_with_simple_seek(self):
+        f = tempfile.NamedTemporaryFile()
+        f.write('some data')
+        f.flush()
+        self.addCleanup(f.close)
+        content = content_from_file(
+            f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
+        self.assertThat(
+            list(content.iter_bytes()), Equals(['data']))
+
+    def test_from_file_with_whence_seek(self):
+        f = tempfile.NamedTemporaryFile()
+        f.write('some data')
+        f.flush()
+        self.addCleanup(f.close)
+        content = content_from_file(
+            f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+        self.assertThat(
+            list(content.iter_bytes()), Equals(['data']))
+
+
     def test_from_stream(self):
         data = StringIO('some data')
         content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
@@ -148,6 +169,20 @@
         self.assertThat(
             ''.join(content.iter_text()), Equals('some data'))
 
+    def test_from_stream_with_simple_seek(self):
+        data = StringIO('some data')
+        content = content_from_stream(
+            data, UTF8_TEXT, chunk_size=50, seek_offset=5)
+        self.assertThat(
+            list(content.iter_bytes()), Equals(['data']))
+
+    def test_from_stream_with_whence_seek(self):
+        data = StringIO('some data')
+        content = content_from_stream(
+            data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+        self.assertThat(
+            list(content.iter_bytes()), Equals(['data']))
+
     def test_from_text(self):
         data = _u("some data")
         expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])


Follow ups