launchpad-reviewers team mailing list archive
-
launchpad-reviewers team
-
Mailing list archive
-
Message #00969
[Merge] lp:~lifeless/launchpad/memcache into lp:launchpad
Robert Collins has proposed merging lp:~lifeless/launchpad/memcache into lp:launchpad with lp:~lifeless/launchpad/bug-631884 as a prerequisite.
Requested reviews:
Launchpad code reviewers (launchpad-reviewers)
Allow disabling memcache via feature flags. This will permit some easy experiments where we have pages that appear to spend a lot of time in memcache, or as a workaround if/when we find that there is a bug in a memcache expression : we can workaround it immediately rather than after-merging-a-code-change.
--
https://code.launchpad.net/~lifeless/launchpad/memcache/+merge/35220
Your team Launchpad code reviewers is requested to review the proposed merge of lp:~lifeless/launchpad/memcache into lp:launchpad.
=== modified file 'lib/lp/services/memcache/client.py'
--- lib/lp/services/memcache/client.py 2010-09-04 05:59:16 +0000
+++ lib/lp/services/memcache/client.py 2010-09-12 05:59:48 +0000
@@ -6,12 +6,14 @@
__metaclass__ = type
__all__ = []
+import logging
import re
from lazr.restful.utils import get_current_browser_request
import memcache
from canonical.config import config
+from lp.services import features
from lp.services.timeline.requesttimeline import get_request_timeline
@@ -32,7 +34,13 @@
timeline = get_request_timeline(request)
return timeline.start("memcache-%s" % suffix, key)
+ @property
+ def _enabled(self):
+ return features.getFeatureFlag('memcache') != 'disabled'
+
def get(self, key):
+ if not self._enabled:
+ return None
action = self.__get_timeline_action("get", key)
try:
return memcache.Client.get(self, key)
@@ -40,9 +48,16 @@
action.finish()
def set(self, key, value, time=0, min_compress_len=0):
+ if not self._enabled:
+ return None
action = self.__get_timeline_action("set", key)
try:
- return memcache.Client.set(self, key, value, time=time,
+ success = memcache.Client.set(self, key, value, time=time,
min_compress_len=min_compress_len)
+ if success:
+ logging.debug("Memcache set succeeded for %s", key)
+ else:
+ logging.warn("Memcache set failed for %s", key)
+ return success
finally:
action.finish()
=== modified file 'lib/lp/services/memcache/doc/tales-cache.txt'
--- lib/lp/services/memcache/doc/tales-cache.txt 2010-06-24 17:59:02 +0000
+++ lib/lp/services/memcache/doc/tales-cache.txt 2010-09-12 05:59:48 +0000
@@ -343,3 +343,36 @@
>>> ignore = config.pop('is_production')
+Disabling
+---------
+
+Memcache in templates can be disabled entirely by setting the memcache flag to
+'disabled'.
+
+ >>> from canonical.launchpad.webapp.servers import LaunchpadTestRequest
+ >>> from lp.services.features.model import FeatureFlag, getFeatureStore
+ >>> from lp.services.features.webapp import ScopesFromRequest
+ >>> from lp.services.features.flags import FeatureController
+ >>> from lp.services.features import per_thread
+ >>> ignore = getFeatureStore().add(FeatureFlag(
+ ... scope=u'default', flag=u'memcache', value=u'disabled',
+ ... priority=1))
+ >>> empty_request = LaunchpadTestRequest()
+ >>> per_thread.features = FeatureController(
+ ... ScopesFromRequest(empty_request).lookup)
+
+And now what cached before will not cache.
+
+ >>> template = TestPageTemplate(dedent("""\
+ ... <div tal:content="cache:public">
+ ... <span tal:content="param">placeholder</span>
+ ... </div>"""))
+
+ >>> print template(param='first')
+ <div>
+ <span>first</span>
+ </div>
+ >>> print template(param='second')
+ <div>
+ <span>second</span>
+ </div>
=== modified file 'lib/lp/services/memcache/tales.py'
--- lib/lp/services/memcache/tales.py 2010-08-20 20:31:18 +0000
+++ lib/lp/services/memcache/tales.py 2010-09-12 05:59:48 +0000
@@ -148,7 +148,9 @@
+ ''.join(chr(i) for i in range(ord(':')+1, 127)) + '_' * 129)
# We strip digits from our LPCONFIG when generating the key
- # to ensure that edge1 and edge4 share cache.
+ # to ensure that multiple appserver instances sharing a memcache instance
+ # can get hits from each other. For instance edge1 and edge4 are in this
+ # situation.
_lpconfig = config.instance_name.rstrip('0123456789')
def getKey(self, econtext):
@@ -278,11 +280,7 @@
rule = '%s [%s seconds]' % (self._memcache_expr, self._max_age)
value = "<!-- Cache hit: %s -->%s<!-- End cache hit: %s -->" % (
rule, value, rule)
- if getUtility(IMemcacheClient).set(
- self._key, value, self._max_age):
- logging.debug("Memcache set succeeded for %s", self._key)
- else:
- logging.warn("Memcache set failed for %s", self._key)
+ getUtility(IMemcacheClient).set(self._key, value, self._max_age)
def __repr__(self):
return "<MemcacheCallback %s %d>" % (self._key, self._max_age)
Follow ups