← Back to team overview

calibre-devs team mailing list archive

[Merge] lp:~miurahr/calibre/experimental-recipes into lp:calibre

 

Hiroshi Miura has proposed merging lp:~miurahr/calibre/experimental-recipes into lp:calibre.

Requested reviews:
  Kovid Goyal (kovid)


update Japanese recipes includes:

  - add Yomiuri online news
  - fix encode error on Jiji Press
  - several minor fixes
  - add icons

-- 
https://code.launchpad.net/~miurahr/calibre/experimental-recipes/+merge/42048
Your team calibre developers is subscribed to branch lp:~miurahr/calibre/experimental-recipes.
=== added file 'resources/images/news/mainichi.png'
Binary files resources/images/news/mainichi.png	1970-01-01 00:00:00 +0000 and resources/images/news/mainichi.png	2010-11-28 15:36:15 +0000 differ
=== added file 'resources/images/news/mainichi_it_news.png'
Binary files resources/images/news/mainichi_it_news.png	1970-01-01 00:00:00 +0000 and resources/images/news/mainichi_it_news.png	2010-11-28 15:36:15 +0000 differ
=== added file 'resources/images/news/nikkei_sub.png'
Binary files resources/images/news/nikkei_sub.png	1970-01-01 00:00:00 +0000 and resources/images/news/nikkei_sub.png	2010-11-28 15:36:15 +0000 differ
=== renamed file 'resources/images/news/nikkei_sub_industory.png' => 'resources/images/news/nikkei_sub_industry.png'
=== modified file 'resources/recipes/cnetjapan.recipe'
--- resources/recipes/cnetjapan.recipe	2010-11-23 16:04:13 +0000
+++ resources/recipes/cnetjapan.recipe	2010-11-28 15:36:15 +0000
@@ -7,7 +7,9 @@
     max_articles_per_feed = 30
     __author__  = 'Hiroshi Miura'
 
-    feeds          = [(u'cnet rss', u'http://feeds.japan.cnet.com/cnet/rss')]
+    feeds          = [(u'CNet News', u'http://feed.japan.cnet.com/rss/index.rdf'),
+                      (u'CNet Blog', u'http://feed.japan.cnet.com/rss/blog/index.rdf')
+                        ]
     language       = 'ja'
     encoding       = 'Shift_JIS'
     remove_javascript = True
@@ -21,12 +23,29 @@
         lambda match: '<!-- removed -->'),
         ]
 
-    remove_tags_before = dict(name="h2")
+    remove_tags_before = dict(id="contents_l")
     remove_tags = [
                    {'class':"social_bkm_share"},
                    {'class':"social_bkm_print"},
                    {'class':"block20 clearfix"},
                    dict(name="div",attrs={'id':'bookreview'}),
+                   {'class':"tag_left_ttl"},
+                   {'class':"tag_right"}
                     ]
     remove_tags_after = {'class':"block20"}
 
+    def parse_feeds(self):
+
+        feeds = BasicNewsRecipe.parse_feeds(self)
+
+        for curfeed in feeds:
+            delList = []
+            for a,curarticle in enumerate(curfeed.articles):
+                if re.search(r'pheedo.jp', curarticle.url):
+                    delList.append(curarticle)
+            if len(delList)>0:
+                for d in delList:
+                    index = curfeed.articles.index(d)
+                    curfeed.articles[index:index+1] = []
+
+        return feeds

=== added file 'resources/recipes/cnetjapan_digital.recipe'
--- resources/recipes/cnetjapan_digital.recipe	1970-01-01 00:00:00 +0000
+++ resources/recipes/cnetjapan_digital.recipe	2010-11-28 15:36:15 +0000
@@ -0,0 +1,49 @@
+import re
+from calibre.web.feeds.news import BasicNewsRecipe
+
+class CNetJapanDigital(BasicNewsRecipe):
+    title          = u'CNET Japan Digital'
+    oldest_article = 3
+    max_articles_per_feed = 30
+    __author__  = 'Hiroshi Miura'
+
+    feeds          = [(u'CNet digital',u'http://feed.japan.cnet.com/rss/digital/index.rdf') ]
+    language       = 'ja'
+    encoding       = 'Shift_JIS'
+    remove_javascript = True
+
+    preprocess_regexps = [
+       (re.compile(ur'<!--\u25B2contents_left END\u25B2-->.*</body>', re.DOTALL|re.IGNORECASE|re.UNICODE),
+        lambda match: '</body>'),
+       (re.compile(r'<!--AD_ELU_HEADER-->.*</body>', re.DOTALL|re.IGNORECASE),
+        lambda match: '</body>'),
+       (re.compile(ur'<!-- \u25B2\u95A2\u9023\u30BF\u30B0\u25B2 -->.*<!-- \u25B2ZDNet\u25B2 -->', re.UNICODE),
+        lambda match: '<!-- removed -->'),
+        ]
+
+    remove_tags_before = dict(id="contents_l")
+    remove_tags = [
+                   {'class':"social_bkm_share"},
+                   {'class':"social_bkm_print"},
+                   {'class':"block20 clearfix"},
+                   dict(name="div",attrs={'id':'bookreview'}),
+                   {'class':"tag_left_ttl"},
+                   {'class':"tag_right"}
+                    ]
+    remove_tags_after = {'class':"block20"}
+
+    def parse_feeds(self):
+
+        feeds = BasicNewsRecipe.parse_feeds(self)
+
+        for curfeed in feeds:
+            delList = []
+            for a,curarticle in enumerate(curfeed.articles):
+                if re.search(r'pheedo.jp', curarticle.url):
+                    delList.append(curarticle)
+            if len(delList)>0:
+                for d in delList:
+                    index = curfeed.articles.index(d)
+                    curfeed.articles[index:index+1] = []
+
+        return feeds

=== added file 'resources/recipes/cnetjapan_release.recipe'
--- resources/recipes/cnetjapan_release.recipe	1970-01-01 00:00:00 +0000
+++ resources/recipes/cnetjapan_release.recipe	2010-11-28 15:36:15 +0000
@@ -0,0 +1,48 @@
+import re
+from calibre.web.feeds.news import BasicNewsRecipe
+
+class CNetJapanRelease(BasicNewsRecipe):
+    title          = u'CNET Japan release'
+    oldest_article = 3
+    max_articles_per_feed = 30
+    __author__  = 'Hiroshi Miura'
+
+    feeds          = [(u'CNet Release', u'http://feed.japan.cnet.com/rss/release/index.rdf') ]
+    language       = 'ja'
+    encoding       = 'Shift_JIS'
+    remove_javascript = True
+
+    preprocess_regexps = [
+       (re.compile(ur'<!--\u25B2contents_left END\u25B2-->.*</body>', re.DOTALL|re.IGNORECASE|re.UNICODE),
+        lambda match: '</body>'),
+       (re.compile(r'<!--AD_ELU_HEADER-->.*</body>', re.DOTALL|re.IGNORECASE),
+        lambda match: '</body>'),
+       (re.compile(ur'<!-- \u25B2\u95A2\u9023\u30BF\u30B0\u25B2 -->.*<!-- \u25B2ZDNet\u25B2 -->', re.UNICODE),
+        lambda match: '<!-- removed -->'),
+        ]
+
+    remove_tags_before = dict(id="contents_l")
+    remove_tags = [
+                   {'class':"social_bkm_share"},
+                   {'class':"social_bkm_print"},
+                   {'class':"block20 clearfix"},
+                   dict(name="div",attrs={'id':'bookreview'}),
+                   {'class':"tag_left_ttl"}
+                    ]
+    remove_tags_after = {'class':"block20"}
+
+    def parse_feeds(self):
+
+        feeds = BasicNewsRecipe.parse_feeds(self)
+
+        for curfeed in feeds:
+            delList = []
+            for a,curarticle in enumerate(curfeed.articles):
+                if re.search(r'pheedo.jp', curarticle.url):
+                    delList.append(curarticle)
+            if len(delList)>0:
+                for d in delList:
+                    index = curfeed.articles.index(d)
+                    curfeed.articles[index:index+1] = []
+
+        return feeds

=== modified file 'resources/recipes/jijinews.recipe'
--- resources/recipes/jijinews.recipe	2010-11-23 16:04:13 +0000
+++ resources/recipes/jijinews.recipe	2010-11-28 15:36:15 +0000
@@ -14,13 +14,20 @@
     description    = 'World News from Jiji Press'
     publisher      = 'Jiji Press Ltd.'
     category       = 'news'
-    encoding       = 'utf-8'
     oldest_article = 6
     max_articles_per_feed = 100
+    encoding       = 'euc_jisx0213'
     language       = 'ja'
-    cover_url       = 'http://www.jiji.com/img/top_header_logo2.gif'
-    masthead_url    = 'http://jen.jiji.com/images/logo_jijipress.gif'
+    masthead_url   = 'http://jen.jiji.com/images/logo_jijipress.gif'
+    top_url        = 'http://www.jiji.com/'
 
     feeds          = [(u'\u30cb\u30e5\u30fc\u30b9', u'http://www.jiji.com/rss/ranking.rdf')]
     remove_tags_after = dict(id="ad_google")
 
+    def get_cover_url(self):
+        cover_url       = 'http://www.jiji.com/img/top_header_logo2.gif'
+        soup = self.index_to_soup(self.top_url)
+        cover_item = soup.find('div', attrs={'class':'top-pad-photos'})
+        if cover_item:
+            cover_url = self.top_url + cover_item.img['src']
+        return cover_url

=== modified file 'resources/recipes/msnsankei.recipe'
--- resources/recipes/msnsankei.recipe	2010-11-23 16:04:13 +0000
+++ resources/recipes/msnsankei.recipe	2010-11-28 15:36:15 +0000
@@ -1,4 +1,3 @@
-#!/usr/bin/env  python
 
 __license__   = 'GPL v3'
 __copyright__ = '2010, Hiroshi Miura <miurahr@xxxxxxxxx>'
@@ -16,9 +15,13 @@
     max_articles_per_feed = 100
     encoding       = 'Shift_JIS'
     language       = 'ja'
+    cover_url       = 'http://sankei.jp.msn.com/images/common/sankeShinbunLogo.jpg'
+    masthead_url = 'http://sankei.jp.msn.com/images/common/sankeiNewsLogo.gif'
 
     feeds          = [(u'\u65b0\u5546\u54c1', u'http://sankei.jp.msn.com/rss/news/release.xml')]
 
     remove_tags_before = dict(id="__r_article_title__")
     remove_tags_after  = dict(id="ajax_release_news")
-    remove_tags = [{'class':"parent chromeCustom6G"}]
+    remove_tags = [{'class':"parent chromeCustom6G"},
+                              dict(id="RelatedImg")
+                            ]

=== modified file 'resources/recipes/nikkei_sub_main.recipe'
--- resources/recipes/nikkei_sub_main.recipe	2010-11-23 16:04:13 +0000
+++ resources/recipes/nikkei_sub_main.recipe	2010-11-28 15:36:15 +0000
@@ -30,6 +30,9 @@
                        {'class':"JSID_basePageMove JSID_baseAsyncSubmit cmn-form_area JSID_optForm_utoken"},
                        {'class':"cmn-article_keyword cmn-clearfix"},
                        {'class':"cmn-print_headline cmn-clearfix"},
+                       {'class':"cmn-article_list"},
+                       {'class':"cmn-dashedline"},
+                       {'class':"cmn-hide"},
                          ]
     remove_tags_after = {'class':"cmn-pr_list"}
 


Follow ups