1 |
#!/usr/bin/python |
"""Ultra-liberal feed parser |
2 |
"""Ultra-liberal RSS parser |
|
3 |
|
Visit http://diveintomark.org/projects/feed_parser/ for the latest version |
4 |
This parser has been modified by Juri Pakaste. |
|
5 |
Updates by Jan Alonzo. |
Handles RSS 0.9x, RSS 1.0, RSS 2.0, Pie/Atom/Echo feeds |
6 |
|
|
7 |
Visit http://diveintomark.org/projects/rss_parser/ for the latest version |
RSS 0.9x/common elements: |
8 |
|
- title, link, guid, description, webMaster, managingEditor, language |
9 |
Handles RSS 0.9x and RSS 1.0 feeds |
copyright, lastBuildDate, pubDate |
10 |
|
|
11 |
RSS 0.9x/common elements: |
Additional RSS 1.0/2.0 elements: |
12 |
- title, link, guid, description, webMaster, managingEditor, language |
- dc:rights, dc:language, dc:creator, dc:date, dc:subject, |
13 |
copyright, lastBuildDate, pubDate |
content:encoded, admin:generatorAgent, admin:errorReportsTo, |
14 |
|
|
15 |
Additional RSS 1.0/2.0 elements: |
Addition Pie/Atom/Echo elements: |
16 |
- dc:rights, dc:language, dc:creator, dc:date, dc:subject, |
- subtitle, created, issued, modified, summary, id, content |
17 |
content:encoded, admin:generatorAgent, admin:errorReportsTo, |
|
18 |
|
Things it handles that choke other parsers: |
19 |
Addition Pie/Atom/Echo elements: |
- bastard combinations of RSS 0.9x and RSS 1.0 |
20 |
- subtitle, created, issued, modified, summary, id, content |
- illegal XML characters |
21 |
|
- naked and/or invalid HTML in description |
22 |
Things it handles that choke other parsers: |
- content:encoded in item element |
23 |
- bastard combinations of RSS 0.9x and RSS 1.0 |
- guid in item element |
24 |
- illegal XML characters |
- fullitem in item element |
25 |
- naked and/or invalid HTML in description |
- non-standard namespaces |
26 |
- content:encoded in item element |
- inline XML in content (Pie/Atom/Echo) |
27 |
- guid in item element |
- multiple content items per entry (Pie/Atom/Echo) |
28 |
- fullitem in item element |
|
29 |
- non-standard namespaces |
Requires Python 2.2 or later |
30 |
- inline XML in content (Pie/Atom/Echo) |
""" |
31 |
- multiple content items per entry (Pie/Atom/Echo) |
|
32 |
|
__version__ = "2.5.3" |
33 |
Requires Python 2.2 or later |
__author__ = "Mark Pilgrim <http://diveintomark.org/>" |
34 |
""" |
__copyright__ = "Copyright 2002-3, Mark Pilgrim" |
35 |
|
__contributors__ = ["Jason Diamond <http://injektilo.org/>", |
36 |
__version__ = "2.5.2" |
"John Beimler <http://john.beimler.org/>"] |
37 |
__author__ = "Mark Pilgrim <http://diveintomark.org/>" |
__license__ = "Python" |
38 |
__copyright__ = "Copyright 2002-3, Mark Pilgrim" |
__history__ = """ |
39 |
__contributors__ = ["Jason Diamond <http://injektilo.org/>", |
1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, |
40 |
"John Beimler <http://john.beimler.org/>"] |
added Simon Fell's test suite |
41 |
__license__ = "Python" |
1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections |
42 |
__history__ = """ |
2.0 - 10/19/2002 |
43 |
1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements, |
JD - use inchannel to watch out for image and textinput elements which can |
44 |
added Simon Fell's test suite |
also contain title, link, and description elements |
45 |
1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections |
JD - check for isPermaLink="false" attribute on guid elements |
46 |
2.0 - 10/19/2002 |
JD - replaced openAnything with open_resource supporting ETag and |
47 |
JD - use inchannel to watch out for image and textinput elements which can |
If-Modified-Since request headers |
48 |
also contain title, link, and description elements |
JD - parse now accepts etag, modified, agent, and referrer optional |
49 |
JD - check for isPermaLink="false" attribute on guid elements |
arguments |
50 |
JD - replaced openAnything with open_resource supporting ETag and |
JD - modified parse to return a dictionary instead of a tuple so that any |
51 |
If-Modified-Since request headers |
etag or modified information can be returned and cached by the caller |
52 |
JD - parse now accepts etag, modified, agent, and referrer optional |
2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything |
53 |
arguments |
because of etag/modified, return the old etag/modified to the caller to |
54 |
JD - modified parse to return a dictionary instead of a tuple so that any |
indicate why nothing is being returned |
55 |
etag or modified information can be returned and cached by the caller |
2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its |
56 |
2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything |
useless. Fixes the problem JD was addressing by adding it. |
57 |
because of etag/modified, return the old etag/modified to the caller to |
2.1 - 11/14/2002 - MAP - added gzip support |
58 |
indicate why nothing is being returned |
2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. |
59 |
2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its |
start_admingeneratoragent is an example of how to handle elements with |
60 |
useless. Fixes the problem JD was addressing by adding it. |
only attributes, no content. |
61 |
2.1 - 11/14/2002 - MAP - added gzip support |
2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); |
62 |
2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent. |
also, make sure we send the User-Agent even if urllib2 isn't available. |
63 |
start_admingeneratoragent is an example of how to handle elements with |
Match any variation of backend.userland.com/rss namespace. |
64 |
only attributes, no content. |
2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. |
65 |
2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify); |
2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's |
66 |
also, make sure we send the User-Agent even if urllib2 isn't available. |
snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed |
67 |
Match any variation of backend.userland.com/rss namespace. |
project name |
68 |
2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is. |
2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); |
69 |
2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's |
removed unnecessary urllib code -- urllib2 should always be available anyway; |
70 |
snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed |
return actual url, status, and full HTTP headers (as result['url'], |
71 |
project name |
result['status'], and result['headers']) if parsing a remote feed over HTTP -- |
72 |
2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree); |
this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; |
73 |
removed unnecessary urllib code -- urllib2 should always be available anyway; |
added the latest namespace-of-the-week for RSS 2.0 |
74 |
return actual url, status, and full HTTP headers (as result['url'], |
2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom |
75 |
result['status'], and result['headers']) if parsing a remote feed over HTTP -- |
User-Agent (otherwise urllib2 sends two, which confuses some servers) |
76 |
this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>; |
2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for |
77 |
added the latest namespace-of-the-week for RSS 2.0 |
inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds |
78 |
2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom |
2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or |
79 |
User-Agent (otherwise urllib2 sends two, which confuses some servers) |
textInput, and also to return the character encoding (if specified) |
80 |
2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for |
""" |
81 |
inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds |
|
82 |
""" |
cvs_id = "$Id$" |
83 |
|
|
84 |
cvs_id = "$Id$" |
|
85 |
|
try: |
86 |
import cgi, re, sgmllib, string, StringIO, urllib, urllib2 |
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py |
87 |
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') |
timeoutsocket.setDefaultSocketTimeout(10) |
88 |
|
except ImportError: |
89 |
def decodeEntities(data): |
pass |
90 |
data = data or '' |
import cgi, re, sgmllib, string, StringIO, gzip, urllib2 |
91 |
data = data.replace('<', '<') |
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') |
92 |
data = data.replace('>', '>') |
|
93 |
data = data.replace('"', '"') |
USER_AGENT = "UltraLiberalFeedParser/%s +http://diveintomark.org/projects/feed_parser/" % __version__ |
94 |
data = data.replace(''', "'") |
|
95 |
data = data.replace('&', '&') |
def decodeEntities(data): |
96 |
data = data.replace('<', '<') |
data = data or '' |
97 |
data = data.replace('>', '>') |
data = data.replace('<', '<') |
98 |
return data |
data = data.replace('>', '>') |
99 |
|
data = data.replace('"', '"') |
100 |
class RSSParser(sgmllib.SGMLParser): |
data = data.replace(''', "'") |
101 |
namespaces = {"http://backend.userland.com/rss": "", |
data = data.replace('&', '&') |
102 |
"http://backend.userland.com/rss2": "", |
data = data.replace('<', '<') |
103 |
"http://blogs.law.harvard.edu/tech/rss": "", |
data = data.replace('>', '>') |
104 |
"http://purl.org/rss/1.0/": "", |
return data |
105 |
"http://example.com/newformat#": "", |
|
106 |
"http://example.com/necho": "", |
class FeedParser(sgmllib.SGMLParser): |
107 |
"http://purl.org/echo/": "", |
namespaces = {"http://my.netscape.com/rdf/simple/0.9/": "", |
108 |
"uri/of/echo/namespace#": "", |
"http://blogs.law.harvard.edu/tech/rss": "", |
109 |
"http://purl.org/pie/": "", |
"http://purl.org/rss/1.0/": "", |
110 |
"http://purl.org/rss/1.0/modules/textinput/": "ti", |
"http://example.com/newformat#": "", |
111 |
"http://purl.org/rss/1.0/modules/company/": "co", |
"http://example.com/necho": "", |
112 |
"http://purl.org/rss/1.0/modules/syndication/": "sy", |
"http://purl.org/echo/": "", |
113 |
"http://purl.org/dc/elements/1.1/": "dc", |
"uri/of/echo/namespace#": "", |
114 |
"http://webns.net/mvcb/": "admin", |
"http://purl.org/pie/": "", |
115 |
"http://www.w3.org/1999/xhtml": "xhtml"} |
"http://purl.org/rss/1.0/modules/textinput/": "ti", |
116 |
|
"http://purl.org/rss/1.0/modules/company/": "co", |
117 |
def reset(self): |
"http://purl.org/rss/1.0/modules/syndication/": "sy", |
118 |
self.channel = {} |
"http://purl.org/dc/elements/1.1/": "dc", |
119 |
self.items = [] |
"http://webns.net/mvcb/": "admin", |
120 |
self.elementstack = [] |
"http://www.w3.org/1999/xhtml": "xhtml", |
121 |
self.inchannel = 0 |
"http://freshmeat.net/rss/fm/": "fm", |
122 |
self.initem = 0 |
"http://prismstandard.org/namespaces/1.2/basic/": "prism", |
123 |
self.incontent = 0 |
"http://backend.userland.com/blogChannelModule/": "blogChannel", |
124 |
self.contentmode = None |
"http://backend.userland.com/creativeCommonsRssModule/": "creativeCommons"} |
125 |
self.contenttype = None |
|
126 |
self.contentlang = None |
def reset(self): |
127 |
self.namespacemap = {} |
self.channel = {} |
128 |
sgmllib.SGMLParser.reset(self) |
self.items = [] |
129 |
|
self.elementstack = [] |
130 |
def push(self, element, expectingText): |
self.inchannel = 0 |
131 |
self.elementstack.append([element, expectingText, []]) |
self.initem = 0 |
132 |
|
self.incontent = 0 |
133 |
def pop(self, element): |
self.intextinput = 0 |
134 |
if not self.elementstack: return |
self.inimage = 0 |
135 |
if self.elementstack[-1][0] != element: return |
self.contentmode = None |
136 |
element, expectingText, pieces = self.elementstack.pop() |
self.contenttype = None |
137 |
if not expectingText: return |
self.contentlang = None |
138 |
output = "".join(pieces) |
self.namespacemap = {} |
139 |
output = decodeEntities(output) |
sgmllib.SGMLParser.reset(self) |
140 |
if self.incontent and self.initem: |
|
141 |
if not self.items[-1].has_key(element): |
def push(self, element, expectingText): |
142 |
self.items[-1][element] = [] |
self.elementstack.append([element, expectingText, []]) |
143 |
self.items[-1][element].append({"language":self.contentlang, "type":self.contenttype, "value":output}) |
|
144 |
elif self.initem: |
def pop(self, element): |
145 |
self.items[-1][element] = output |
if not self.elementstack: return |
146 |
elif self.inchannel: |
if self.elementstack[-1][0] != element: return |
147 |
self.channel[element] = output |
element, expectingText, pieces = self.elementstack.pop() |
148 |
|
if not expectingText: return |
149 |
def _addNamespaces(self, attrs): |
output = "".join(pieces) |
150 |
for prefix, value in attrs: |
output = decodeEntities(output) |
151 |
if not prefix.startswith("xmlns:"): continue |
if self.incontent and self.initem: |
152 |
prefix = prefix[6:] |
if not self.items[-1].has_key(element): |
153 |
if prefix.find('backend.userland.com/rss') <> -1: |
self.items[-1][element] = [] |
154 |
# match any backend.userland.com namespace |
self.items[-1][element].append({"language":self.contentlang, "type":self.contenttype, "value":output}) |
155 |
prefix = 'http://backend.userland.com/rss' |
elif self.initem: |
156 |
if self.namespaces.has_key(value): |
self.items[-1][element] = output |
157 |
self.namespacemap[prefix] = self.namespaces[value] |
elif self.inchannel and (not self.intextinput) and (not self.inimage): |
158 |
|
self.channel[element] = output |
159 |
def _mapToStandardPrefix(self, name): |
|
160 |
colonpos = name.find(':') |
def _addNamespaces(self, attrs): |
161 |
if colonpos <> -1: |
for prefix, value in attrs: |
162 |
prefix = name[:colonpos] |
if not prefix.startswith("xmlns:"): continue |
163 |
suffix = name[colonpos+1:] |
prefix = prefix[6:] |
164 |
prefix = self.namespacemap.get(prefix, prefix) |
if prefix.find('backend.userland.com/rss') <> -1: |
165 |
name = prefix + ':' + suffix |
# match any backend.userland.com namespace |
166 |
return name |
prefix = 'http://backend.userland.com/rss' |
167 |
|
if self.namespaces.has_key(value): |
168 |
def _getAttribute(self, attrs, name): |
self.namespacemap[prefix] = self.namespaces[value] |
169 |
value = [v for k, v in attrs if self._mapToStandardPrefix(k) == name] |
|
170 |
if value: |
def _mapToStandardPrefix(self, name): |
171 |
value = value[0] |
colonpos = name.find(':') |
172 |
else: |
if colonpos <> -1: |
173 |
value = None |
prefix = name[:colonpos] |
174 |
return value |
suffix = name[colonpos+1:] |
175 |
|
prefix = self.namespacemap.get(prefix, prefix) |
176 |
def start_channel(self, attrs): |
name = prefix + ':' + suffix |
177 |
self.push('channel', 0) |
return name |
178 |
self.inchannel = 1 |
|
179 |
|
def _getAttribute(self, attrs, name): |
180 |
def end_channel(self): |
value = [v for k, v in attrs if self._mapToStandardPrefix(k) == name] |
181 |
self.pop('channel') |
if value: |
182 |
self.inchannel = 0 |
value = value[0] |
183 |
|
else: |
184 |
def start_item(self, attrs): |
value = None |
185 |
self.items.append({}) |
return value |
186 |
self.push('item', 0) |
|
187 |
self.initem = 1 |
def start_channel(self, attrs): |
188 |
|
self.push('channel', 0) |
189 |
def end_item(self): |
self.inchannel = 1 |
190 |
self.pop('item') |
|
191 |
self.initem = 0 |
def end_channel(self): |
192 |
|
self.pop('channel') |
193 |
def start_dc_language(self, attrs): |
self.inchannel = 0 |
194 |
self.push('language', 1) |
|
195 |
start_language = start_dc_language |
def start_image(self, attrs): |
196 |
|
self.inimage = 1 |
197 |
def end_dc_language(self): |
|
198 |
self.pop('language') |
def end_image(self): |
199 |
end_language = end_dc_language |
self.inimage = 0 |
200 |
|
|
201 |
def start_dc_creator(self, attrs): |
def start_textinput(self, attrs): |
202 |
self.push('creator', 1) |
self.intextinput = 1 |
203 |
start_managingeditor = start_dc_creator |
|
204 |
start_webmaster = start_dc_creator |
def end_textinput(self): |
205 |
|
self.intextinput = 0 |
206 |
def end_dc_creator(self): |
|
207 |
self.pop('creator') |
def start_item(self, attrs): |
208 |
end_managingeditor = end_dc_creator |
self.items.append({}) |
209 |
end_webmaster = end_dc_creator |
self.push('item', 0) |
210 |
|
self.initem = 1 |
211 |
def start_dc_rights(self, attrs): |
|
212 |
self.push('rights', 1) |
def end_item(self): |
213 |
start_copyright = start_dc_rights |
self.pop('item') |
214 |
|
self.initem = 0 |
215 |
def end_dc_rights(self): |
|
216 |
self.pop('rights') |
def start_dc_language(self, attrs): |
217 |
end_copyright = end_dc_rights |
self.push('language', 1) |
218 |
|
start_language = start_dc_language |
219 |
def start_dc_date(self, attrs): |
|
220 |
self.push('date', 1) |
def end_dc_language(self): |
221 |
start_lastbuilddate = start_dc_date |
self.pop('language') |
222 |
start_pubdate = start_dc_date |
end_language = end_dc_language |
223 |
|
|
224 |
def end_dc_date(self): |
def start_dc_creator(self, attrs): |
225 |
self.pop('date') |
self.push('creator', 1) |
226 |
end_lastbuilddate = end_dc_date |
start_managingeditor = start_dc_creator |
227 |
end_pubdate = end_dc_date |
start_webmaster = start_dc_creator |
228 |
|
|
229 |
def start_dc_subject(self, attrs): |
def end_dc_creator(self): |
230 |
self.push('category', 1) |
self.pop('creator') |
231 |
|
end_managingeditor = end_dc_creator |
232 |
def end_dc_subject(self): |
end_webmaster = end_dc_creator |
233 |
self.pop('category') |
|
234 |
|
def start_dc_rights(self, attrs): |
235 |
def start_link(self, attrs): |
self.push('rights', 1) |
236 |
self.push('link', self.inchannel or self.initem) |
start_copyright = start_dc_rights |
237 |
|
|
238 |
def end_link(self): |
def end_dc_rights(self): |
239 |
self.pop('link') |
self.pop('rights') |
240 |
|
end_copyright = end_dc_rights |
241 |
def start_guid(self, attrs): |
|
242 |
self.guidislink = ('ispermalink', 'false') not in attrs |
def start_dc_date(self, attrs): |
243 |
self.push('guid', 1) |
self.push('date', 1) |
244 |
|
start_lastbuilddate = start_dc_date |
245 |
def end_guid(self): |
start_pubdate = start_dc_date |
246 |
self.pop('guid') |
|
247 |
if self.guidislink: |
def end_dc_date(self): |
248 |
if not self.items[-1].has_key('link'): |
self.pop('date') |
249 |
# guid acts as link, but only if "ispermalink" is not present or is "true", |
end_lastbuilddate = end_dc_date |
250 |
# and only if the item doesn't already have a link element |
end_pubdate = end_dc_date |
251 |
self.items[-1]['link'] = self.items[-1]['guid'] |
|
252 |
|
def start_dc_subject(self, attrs): |
253 |
def start_source(self, attrs): |
self.push('category', 1) |
254 |
self.push('source', 1) |
|
255 |
for attr, value in attrs: |
def end_dc_subject(self): |
256 |
if attr == 'url': |
self.pop('category') |
257 |
self.items[-1]['source_url'] = attrs |
|
258 |
|
def start_link(self, attrs): |
259 |
def end_source(self): |
self.push('link', self.inchannel or self.initem) |
260 |
self.pop('source') |
|
261 |
|
def end_link(self): |
262 |
def start_title(self, attrs): |
self.pop('link') |
263 |
self.push('title', self.inchannel or self.initem) |
|
264 |
|
def start_guid(self, attrs): |
265 |
def start_description(self, attrs): |
self.guidislink = ('ispermalink', 'false') not in attrs |
266 |
self.push('description', self.inchannel or self.initem) |
self.push('guid', 1) |
267 |
|
|
268 |
def start_content_encoded(self, attrs): |
def end_guid(self): |
269 |
self.push('content_encoded', 1) |
self.pop('guid') |
270 |
start_fullitem = start_content_encoded |
if self.guidislink: |
271 |
|
if not self.items[-1].has_key('link'): |
272 |
def end_content_encoded(self): |
# guid acts as link, but only if "ispermalink" is not present or is "true", |
273 |
self.pop('content_encoded') |
# and only if the item doesn't already have a link element |
274 |
end_fullitem = end_content_encoded |
self.items[-1]['link'] = self.items[-1]['guid'] |
275 |
|
|
276 |
def start_admin_generatoragent(self, attrs): |
def start_title(self, attrs): |
277 |
self.push('generator', 1) |
self.push('title', self.inchannel or self.initem) |
278 |
value = self._getAttribute(attrs, 'rdf:resource') |
|
279 |
if value: |
def start_description(self, attrs): |
280 |
self.elementstack[-1][2].append(value) |
self.push('description', self.inchannel or self.initem) |
281 |
self.pop('generator') |
|
282 |
|
def start_content_encoded(self, attrs): |
283 |
def start_feed(self, attrs): |
self.push('content_encoded', 1) |
284 |
self.inchannel = 1 |
start_fullitem = start_content_encoded |
285 |
|
|
286 |
def end_feed(self): |
def end_content_encoded(self): |
287 |
self.inchannel = 0 |
self.pop('content_encoded') |
288 |
|
end_fullitem = end_content_encoded |
289 |
def start_entry(self, attrs): |
|
290 |
self.items.append({}) |
def start_admin_generatoragent(self, attrs): |
291 |
self.push('item', 0) |
self.push('generator', 1) |
292 |
self.initem = 1 |
value = self._getAttribute(attrs, 'rdf:resource') |
293 |
|
if value: |
294 |
def end_entry(self): |
self.elementstack[-1][2].append(value) |
295 |
self.pop('item') |
self.pop('generator') |
296 |
self.initem = 0 |
|
297 |
|
def start_feed(self, attrs): |
298 |
def start_subtitle(self, attrs): |
self.inchannel = 1 |
299 |
self.push('subtitle', 1) |
|
300 |
|
def end_feed(self): |
301 |
def end_subtitle(self): |
self.inchannel = 0 |
302 |
self.pop('subtitle') |
|
303 |
|
def start_entry(self, attrs): |
304 |
def start_summary(self, attrs): |
self.items.append({}) |
305 |
self.push('summary', 1) |
self.push('item', 0) |
306 |
|
self.initem = 1 |
307 |
def end_summary(self): |
|
308 |
self.pop('summary') |
def end_entry(self): |
309 |
|
self.pop('item') |
310 |
def start_modified(self, attrs): |
self.initem = 0 |
311 |
self.push('modified', 1) |
|
312 |
|
def start_subtitle(self, attrs): |
313 |
def end_modified(self): |
self.push('subtitle', 1) |
314 |
self.pop('modified') |
|
315 |
|
def end_subtitle(self): |
316 |
def start_created(self, attrs): |
self.pop('subtitle') |
317 |
self.push('created', 1) |
|
318 |
|
def start_summary(self, attrs): |
319 |
def end_created(self): |
self.push('summary', 1) |
320 |
self.pop('created') |
|
321 |
|
def end_summary(self): |
322 |
def start_issued(self, attrs): |
self.pop('summary') |
323 |
self.push('issued', 1) |
|
324 |
|
def start_modified(self, attrs): |
325 |
def end_issued(self): |
self.push('modified', 1) |
326 |
self.pop('issued') |
|
327 |
|
def end_modified(self): |
328 |
def start_id(self, attrs): |
self.pop('modified') |
329 |
self.push('id', 1) |
|
330 |
|
def start_created(self, attrs): |
331 |
def end_id(self): |
self.push('created', 1) |
332 |
self.pop('id') |
|
333 |
|
def end_created(self): |
334 |
def start_content(self, attrs): |
self.pop('created') |
335 |
self.incontent = 1 |
|
336 |
if ('mode', 'escaped') in attrs: |
def start_issued(self, attrs): |
337 |
self.contentmode = 'escaped' |
self.push('issued', 1) |
338 |
elif ('mode', 'base64') in attrs: |
|
339 |
self.contentmode = 'base64' |
def end_issued(self): |
340 |
else: |
self.pop('issued') |
341 |
self.contentmode = 'xml' |
|
342 |
mimetype = [v for k, v in attrs if k=='type'] |
def start_id(self, attrs): |
343 |
if mimetype: |
self.push('id', 1) |
344 |
self.contenttype = mimetype[0] |
|
345 |
xmllang = [v for k, v in attrs if k=='xml:lang'] |
def end_id(self): |
346 |
if xmllang: |
self.pop('id') |
347 |
self.contentlang = xmllang[0] |
|
348 |
self.push('content', 1) |
# Creative Commons Licenses (RSS) |
349 |
|
def start_creativecommons_license(self, attrs): |
350 |
def end_content(self): |
self.push('license',1) |
351 |
self.pop('content') |
|
352 |
self.incontent = 0 |
def end_creativecommons_license(self): |
353 |
self.contentmode = None |
self.pop('license') |
354 |
self.contenttype = None |
|
355 |
self.contentlang = None |
# blogChannel |
356 |
|
def start_blogchannel_blogroll(self, attrs): |
357 |
def start_body(self, attrs): |
self.push('blogRoll',1) |
358 |
self.incontent = 1 |
|
359 |
self.contentmode = 'xml' |
def end_blogchannel_blogroll(self): |
360 |
self.contenttype = 'application/xhtml+xml' |
self.pop('blogRoll') |
361 |
xmllang = [v for k, v in attrs if k=='xml:lang'] |
|
362 |
if xmllang: |
def start_blogchannel_mysubscriptions(self, attrs): |
363 |
self.contentlang = xmllang[0] |
self.push('mySubscriptions',1) |
364 |
self.push('content', 1) |
|
365 |
|
def end_blogchannel_mysubscriptions(self): |
366 |
start_div = start_body |
self.pop('mySubscriptions') |
367 |
start_xhtml_body = start_body |
|
368 |
start_xhtml_div = start_body |
def start_blogchannel_blink(self, attrs): |
369 |
end_body = end_content |
self.push('blink',1) |
370 |
end_div = end_content |
|
371 |
end_xhtml_body = end_content |
def end_blogchannel_blink(self): |
372 |
end_xhtml_div = end_content |
self.pop('blink') |
373 |
|
|
374 |
def unknown_starttag(self, tag, attrs): |
# freshmeat |
375 |
if self.incontent and self.contentmode == 'xml': |
def start_fm_license(self, attrs): |
376 |
self.handle_data("<%s%s>" % (tag, "".join([' %s="%s"' % t for t in attrs]))) |
self.push('license', 1) |
377 |
return |
|
378 |
self._addNamespaces(attrs) |
def end_fm_license(self): |
379 |
colonpos = tag.find(':') |
self.pop('license') |
380 |
if colonpos <> -1: |
|
381 |
prefix = tag[:colonpos] |
def start_fm_changes(self, attrs): |
382 |
suffix = tag[colonpos+1:] |
self.push('changes', 1) |
383 |
prefix = self.namespacemap.get(prefix, prefix) |
|
384 |
if prefix: |
def end_fm_changes(self): |
385 |
prefix = prefix + '_' |
self.pop('changes') |
386 |
methodname = 'start_' + prefix + suffix |
|
387 |
try: |
# PRISM |
388 |
method = getattr(self, methodname) |
def start_prism_publicationname(self, attrs): |
389 |
return method(attrs) |
self.push('publicationName', 1) |
390 |
except AttributeError: |
|
391 |
return self.push(prefix + suffix, 0) |
def end_prism_publicationname(self): |
392 |
return self.push(tag, 0) |
self.pop('publicationName') |
393 |
|
|
394 |
def unknown_endtag(self, tag): |
def start_prism_volume(self, attrs): |
395 |
if self.incontent and self.contentmode == 'xml': |
self.push('volume', 1) |
396 |
self.handle_data("</%s>" % tag) |
|
397 |
return |
def end_prism_volume(self): |
398 |
colonpos = tag.find(':') |
self.pop('volume') |
399 |
if colonpos <> -1: |
|
400 |
prefix = tag[:colonpos] |
def start_prism_number(self, attrs): |
401 |
suffix = tag[colonpos+1:] |
self.push('number', 1) |
402 |
prefix = self.namespacemap.get(prefix, prefix) |
|
403 |
if prefix: |
def end_prism_number(self): |
404 |
prefix = prefix + '_' |
self.pop('number') |
405 |
methodname = 'end_' + prefix + suffix |
|
406 |
try: |
def start_prism_section(self, attrs): |
407 |
method = getattr(self, methodname) |
self.push('section', 1) |
408 |
return method() |
|
409 |
except AttributeError: |
def end_prism_section(self): |
410 |
return self.pop(prefix + suffix) |
self.pop('section') |
411 |
return self.pop(tag) |
|
412 |
|
def start_prism_startingpage(self, attrs): |
413 |
def handle_charref(self, ref): |
self.push('startingPage', 1) |
414 |
# called for each character reference, e.g. for " ", ref will be "160" |
|
415 |
# Reconstruct the original character reference. |
def end_prism_startingpage(self): |
416 |
if not self.elementstack: return |
self.pop('startingPage') |
417 |
text = "&#%s;" % ref |
|
418 |
if self.incontent and self.contentmode == 'xml': |
|
419 |
text = cgi.escape(text) |
def start_content(self, attrs): |
420 |
self.elementstack[-1][2].append(text) |
self.incontent = 1 |
421 |
|
if ('mode', 'escaped') in attrs: |
422 |
def handle_entityref(self, ref): |
self.contentmode = 'escaped' |
423 |
# called for each entity reference, e.g. for "©", ref will be "copy" |
elif ('mode', 'base64') in attrs: |
424 |
# Reconstruct the original entity reference. |
self.contentmode = 'base64' |
425 |
if not self.elementstack: return |
else: |
426 |
text = "&%s;" % ref |
self.contentmode = 'xml' |
427 |
if self.incontent and self.contentmode == 'xml': |
mimetype = [v for k, v in attrs if k=='type'] |
428 |
text = cgi.escape(text) |
if mimetype: |
429 |
self.elementstack[-1][2].append(text) |
self.contenttype = mimetype[0] |
430 |
|
xmllang = [v for k, v in attrs if k=='xml:lang'] |
431 |
def handle_data(self, text): |
if xmllang: |
432 |
# called for each block of plain text, i.e. outside of any tag and |
self.contentlang = xmllang[0] |
433 |
# not containing any character or entity references |
self.push('content', 1) |
434 |
if not self.elementstack: return |
|
435 |
if self.incontent and self.contentmode == 'xml': |
def end_content(self): |
436 |
text = cgi.escape(text) |
self.pop('content') |
437 |
self.elementstack[-1][2].append(text) |
self.incontent = 0 |
438 |
|
self.contentmode = None |
439 |
def handle_comment(self, text): |
self.contenttype = None |
440 |
# called for each comment, e.g. <!-- insert message here --> |
self.contentlang = None |
441 |
pass |
|
442 |
|
def start_body(self, attrs): |
443 |
def handle_pi(self, text): |
self.incontent = 1 |
444 |
# called for each processing instruction, e.g. <?instruction> |
self.contentmode = 'xml' |
445 |
pass |
self.contenttype = 'application/xhtml+xml' |
446 |
|
xmllang = [v for k, v in attrs if k=='xml:lang'] |
447 |
def handle_decl(self, text): |
if xmllang: |
448 |
# called for the DOCTYPE, if present, e.g. |
self.contentlang = xmllang[0] |
449 |
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" |
self.push('content', 1) |
450 |
# "http://www.w3.org/TR/html4/loose.dtd"> |
|
451 |
pass |
start_div = start_body |
452 |
|
start_xhtml_body = start_body |
453 |
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match |
start_xhtml_div = start_body |
454 |
def _scan_name(self, i, declstartpos): |
end_body = end_content |
455 |
rawdata = self.rawdata |
end_div = end_content |
456 |
n = len(rawdata) |
end_xhtml_body = end_content |
457 |
if i == n: |
end_xhtml_div = end_content |
458 |
return None, -1 |
|
459 |
m = self._new_declname_match(rawdata, i) |
def unknown_starttag(self, tag, attrs): |
460 |
if m: |
if self.incontent and self.contentmode == 'xml': |
461 |
s = m.group() |
self.handle_data("<%s%s>" % (tag, "".join([' %s="%s"' % t for t in attrs]))) |
462 |
name = s.strip() |
return |
463 |
if (i + len(s)) == n: |
self._addNamespaces(attrs) |
464 |
return None, -1 # end of buffer |
colonpos = tag.find(':') |
465 |
return string.lower(name), m.end() |
if colonpos <> -1: |
466 |
else: |
prefix = tag[:colonpos] |
467 |
self.updatepos(declstartpos, i) |
suffix = tag[colonpos+1:] |
468 |
self.error("expected name token") |
prefix = self.namespacemap.get(prefix, prefix) |
469 |
|
if prefix: |
470 |
def parse_declaration(self, i): |
prefix = prefix + '_' |
471 |
# override internal declaration handler to handle CDATA blocks |
methodname = 'start_' + prefix + suffix |
472 |
if self.rawdata[i:i+9] == '<![CDATA[': |
try: |
473 |
k = self.rawdata.find(']]>', i) |
method = getattr(self, methodname) |
474 |
if k == -1: k = len(self.rawdata) |
return method(attrs) |
475 |
self.handle_data(cgi.escape(self.rawdata[i+9:k])) |
except AttributeError: |
476 |
return k+3 |
return self.push(prefix + suffix, 0) |
477 |
return sgmllib.SGMLParser.parse_declaration(self, i) |
return self.push(tag, 0) |
478 |
|
|
479 |
def open_resource(source, etag=None, modified=None, agent=None, referrer=None): |
def unknown_endtag(self, tag): |
480 |
""" |
if self.incontent and self.contentmode == 'xml': |
481 |
URI, filename, or string --> stream |
self.handle_data("</%s>" % tag) |
482 |
|
return |
483 |
This function lets you define parsers that take any input source |
colonpos = tag.find(':') |
484 |
(URL, pathname to local or network file, or actual data as a string) |
if colonpos <> -1: |
485 |
and deal with it in a uniform manner. Returned object is guaranteed |
prefix = tag[:colonpos] |
486 |
to have all the basic stdio read methods (read, readline, readlines). |
suffix = tag[colonpos+1:] |
487 |
Just .close() the object when you're done with it. |
prefix = self.namespacemap.get(prefix, prefix) |
488 |
|
if prefix: |
489 |
If the etag argument is supplied, it will be used as the value of an |
prefix = prefix + '_' |
490 |
If-None-Match request header. |
methodname = 'end_' + prefix + suffix |
491 |
|
try: |
492 |
If the modified argument is supplied, it must be a tuple of 9 integers |
method = getattr(self, methodname) |
493 |
as returned by gmtime() in the standard Python time module. This MUST |
return method() |
494 |
be in GMT (Greenwich Mean Time). The formatted date/time will be used |
except AttributeError: |
495 |
as the value of an If-Modified-Since request header. |
return self.pop(prefix + suffix) |
496 |
|
return self.pop(tag) |
497 |
If the agent argument is supplied, it will be used as the value of a |
|
498 |
User-Agent request header. |
def handle_charref(self, ref): |
499 |
|
# called for each character reference, e.g. for " ", ref will be "160" |
500 |
If the referrer argument is supplied, it will be used as the value of a |
# Reconstruct the original character reference. |
501 |
Referer[sic] request header. |
if not self.elementstack: return |
502 |
|
text = "&#%s;" % ref |
503 |
The optional arguments are only used if the source argument is an HTTP |
if self.incontent and self.contentmode == 'xml': |
504 |
URL and the urllib2 module is importable (i.e., you must be using Python |
text = cgi.escape(text) |
505 |
version 2.0 or higher). |
self.elementstack[-1][2].append(text) |
506 |
""" |
|
507 |
|
def handle_entityref(self, ref): |
508 |
if hasattr(source, "read"): |
# called for each entity reference, e.g. for "©", ref will be "copy" |
509 |
return source |
# Reconstruct the original entity reference. |
510 |
|
if not self.elementstack: return |
511 |
if source == "-": |
text = "&%s;" % ref |
512 |
return sys.stdin |
if self.incontent and self.contentmode == 'xml': |
513 |
|
text = cgi.escape(text) |
514 |
# try to open with urllib2 (to use optional headers) |
self.elementstack[-1][2].append(text) |
515 |
try: |
|
516 |
request = urllib2.Request(source) |
def handle_data(self, text): |
517 |
if etag: |
# called for each block of plain text, i.e. outside of any tag and |
518 |
request.add_header("If-None-Match", etag) |
# not containing any character or entity references |
519 |
if modified: |
if not self.elementstack: return |
520 |
request.add_header("If-Modified-Since", format_http_date(modified)) |
if self.incontent and self.contentmode == 'xml': |
521 |
request.add_header("User-Agent", agent) |
text = cgi.escape(text) |
522 |
if referrer: |
self.elementstack[-1][2].append(text) |
523 |
request.add_header("Referer", referrer) |
|
524 |
request.add_header("Accept-encoding", "gzip") |
def handle_comment(self, text): |
525 |
opener = urllib2.build_opener(FeedURLHandler()) |
# called for each comment, e.g. <!-- insert message here --> |
526 |
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent |
pass |
527 |
try: |
|
528 |
return opener.open(request) |
def handle_pi(self, text): |
529 |
except urllib2.HTTPError: |
# called for each processing instruction, e.g. <?instruction> |
530 |
# either the resource is not modified or some other HTTP |
pass |
531 |
# error occurred so return an empty resource |
|
532 |
return StringIO.StringIO("") |
def handle_decl(self, text): |
533 |
except: |
# called for the DOCTYPE, if present, e.g. |
534 |
# source must not be a valid URL but it might be a valid filename |
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" |
535 |
# pass |
# "http://www.w3.org/TR/html4/loose.dtd"> |
536 |
raise |
pass |
537 |
except ImportError: |
|
538 |
# urllib2 isn't available so try to open with urllib |
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match |
539 |
try: |
def _scan_name(self, i, declstartpos): |
540 |
return urllib.urlopen(source) |
rawdata = self.rawdata |
541 |
except: |
n = len(rawdata) |
542 |
# source still might be a filename |
if i == n: |
543 |
pass |
return None, -1 |
544 |
|
m = self._new_declname_match(rawdata, i) |
545 |
# try to open with native open function (if source is a filename) |
if m: |
546 |
try: |
s = m.group() |
547 |
return open(source) |
name = s.strip() |
548 |
except: |
if (i + len(s)) == n: |
549 |
pass |
return None, -1 # end of buffer |
550 |
|
return string.lower(name), m.end() |
551 |
# treat source as string |
else: |
552 |
return StringIO.StringIO(str(source)) |
self.updatepos(declstartpos, i) |
553 |
|
self.error("expected name token") |
554 |
class FeedURLHandler(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): |
|
555 |
def http_error_default(self, req, fp, code, msg, headers): |
def parse_declaration(self, i): |
556 |
if ((code / 100) == 3) and (code != 304): |
# override internal declaration handler to handle CDATA blocks |
557 |
return self.http_error_302(req, fp, code, msg, headers) |
if self.rawdata[i:i+9] == '<![CDATA[': |
558 |
from urllib import addinfourl |
k = self.rawdata.find(']]>', i) |
559 |
infourl = addinfourl(fp, headers, req.get_full_url()) |
if k == -1: k = len(self.rawdata) |
560 |
infourl.status = code |
self.handle_data(cgi.escape(self.rawdata[i+9:k])) |
561 |
return infourl |
return k+3 |
562 |
# raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) |
return sgmllib.SGMLParser.parse_declaration(self, i) |
563 |
|
|
564 |
def http_error_302(self, req, fp, code, msg, headers): |
def get_etag(resource): |
565 |
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) |
""" |
566 |
infourl.status = code |
Get the ETag associated with a response returned from a call to |
567 |
return infourl |
open_resource(). |
568 |
|
|
569 |
def http_error_301(self, req, fp, code, msg, headers): |
If the resource was not returned from an HTTP server or the server did |
570 |
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) |
not specify an ETag for the resource, this will return None. |
571 |
infourl.status = code |
""" |
572 |
return infourl |
|
573 |
|
if hasattr(resource, "info"): |
574 |
http_error_300 = http_error_302 |
return resource.info().getheader("ETag") |
575 |
http_error_307 = http_error_302 |
return None |
576 |
|
|
577 |
def get_etag(resource): |
def get_modified(resource): |
578 |
""" |
""" |
579 |
Get the ETag associated with a response returned from a call to |
Get the Last-Modified timestamp for a response returned from a call to |
580 |
open_resource(). |
open_resource(). |
581 |
|
|
582 |
If the resource was not returned from an HTTP server or the server did |
If the resource was not returned from an HTTP server or the server did |
583 |
not specify an ETag for the resource, this will return None. |
not specify a Last-Modified timestamp, this function will return None. |
584 |
""" |
Otherwise, it returns a tuple of 9 integers as returned by gmtime() in |
585 |
|
the standard Python time module(). |
586 |
if hasattr(resource, "info"): |
""" |
587 |
return resource.info().getheader("ETag") |
|
588 |
return None |
if hasattr(resource, "info"): |
589 |
|
last_modified = resource.info().getheader("Last-Modified") |
590 |
def get_modified(resource): |
if last_modified: |
591 |
""" |
return parse_http_date(last_modified) |
592 |
Get the Last-Modified timestamp for a response returned from a call to |
return None |
593 |
open_resource(). |
|
594 |
|
short_weekdays = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] |
595 |
If the resource was not returned from an HTTP server or the server did |
long_weekdays = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] |
596 |
not specify a Last-Modified timestamp, this function will return None. |
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] |
597 |
Otherwise, it returns a tuple of 9 integers as returned by gmtime() in |
|
598 |
the standard Python time module(). |
def format_http_date(date): |
599 |
""" |
""" |
600 |
|
Formats a tuple of 9 integers into an RFC 1123-compliant timestamp as |
601 |
if hasattr(resource, "info"): |
required in RFC 2616. We don't use time.strftime() since the %a and %b |
602 |
last_modified = resource.info().getheader("Last-Modified") |
directives can be affected by the current locale (HTTP dates have to be |
603 |
if last_modified: |
in English). The date MUST be in GMT (Greenwich Mean Time). |
604 |
return parse_http_date(last_modified) |
""" |
605 |
return None |
|
606 |
|
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (short_weekdays[date[6]], date[2], months[date[1] - 1], date[0], date[3], date[4], date[5]) |
607 |
short_weekdays = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] |
|
608 |
long_weekdays = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] |
rfc1123_match = re.compile(r"(?P<weekday>[A-Z][a-z]{2}), (?P<day>\d{2}) (?P<month>[A-Z][a-z]{2}) (?P<year>\d{4}) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) GMT").match |
609 |
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] |
rfc850_match = re.compile(r"(?P<weekday>[A-Z][a-z]+), (?P<day>\d{2})-(?P<month>[A-Z][a-z]{2})-(?P<year>\d{2}) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) GMT").match |
610 |
|
asctime_match = re.compile(r"(?P<weekday>[A-Z][a-z]{2}) (?P<month>[A-Z][a-z]{2}) ?(?P<day>\d\d?) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) (?P<year>\d{4})").match |
611 |
def format_http_date(date): |
|
612 |
""" |
def parse_http_date(date): |
613 |
Formats a tuple of 9 integers into an RFC 1123-compliant timestamp as |
""" |
614 |
required in RFC 2616. We don't use time.strftime() since the %a and %b |
Parses any of the three HTTP date formats into a tuple of 9 integers as |
615 |
directives can be affected by the current locale (HTTP dates have to be |
returned by time.gmtime(). This should not use time.strptime() since |
616 |
in English). The date MUST be in GMT (Greenwich Mean Time). |
that function is not available on all platforms and could also be |
617 |
""" |
affected by the current locale. |
618 |
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (short_weekdays[date[6]], date[2], months[date[1] - 1], date[0], date[3], date[4], date[5]) |
""" |
619 |
|
|
620 |
rfc1123_match = re.compile(r"(?P<weekday>[A-Z][a-z]{2}), (?P<day>\d{2}) (?P<month>[A-Z][a-z]{2}) (?P<year>\d{4}) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) GMT").match |
date = str(date) |
621 |
rfc850_match = re.compile(r"(?P<weekday>[A-Z][a-z]+), (?P<day>\d{2})-(?P<month>[A-Z][a-z]{2})-(?P<year>\d{2}) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) GMT").match |
year = 0 |
622 |
asctime_match = re.compile(r"(?P<weekday>[A-Z][a-z]{2}) (?P<month>[A-Z][a-z]{2}) ?(?P<day>\d\d?) (?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}) (?P<year>\d{4})").match |
weekdays = short_weekdays |
623 |
|
|
624 |
def parse_http_date(date): |
m = rfc1123_match(date) |
625 |
""" |
if not m: |
626 |
Parses any of the three HTTP date formats into a tuple of 9 integers as |
m = rfc850_match(date) |
627 |
returned by time.gmtime(). This should not use time.strptime() since |
if m: |
628 |
that function is not available on all platforms and could also be |
year = 1900 |
629 |
affected by the current locale. |
weekdays = long_weekdays |
630 |
""" |
else: |
631 |
|
m = asctime_match(date) |
632 |
date = str(date) |
if not m: |
633 |
year = 0 |
return None |
634 |
weekdays = short_weekdays |
|
635 |
|
try: |
636 |
m = rfc1123_match(date) |
year = year + int(m.group("year")) |
637 |
if not m: |
month = months.index(m.group("month")) + 1 |
638 |
m = rfc850_match(date) |
day = int(m.group("day")) |
639 |
if m: |
hour = int(m.group("hour")) |
640 |
year = 1900 |
minute = int(m.group("minute")) |
641 |
weekdays = long_weekdays |
second = int(m.group("second")) |
642 |
else: |
weekday = weekdays.index(m.group("weekday")) |
643 |
m = asctime_match(date) |
a = int((14 - month) / 12) |
644 |
if not m: |
julian_day = (day - 32045 + int(((153 * (month + (12 * a) - 3)) + 2) / 5) + int((146097 * (year + 4800 - a)) / 400)) - (int((146097 * (year + 4799)) / 400) - 31738) + 1 |
645 |
return None |
daylight_savings_flag = 0 |
646 |
|
return (year, month, day, hour, minute, second, weekday, julian_day, daylight_savings_flag) |
647 |
try: |
except: |
648 |
year = year + int(m.group("year")) |
# the month or weekday lookup probably failed indicating an invalid timestamp |
649 |
month = months.index(m.group("month")) + 1 |
return None |
650 |
day = int(m.group("day")) |
|
651 |
hour = int(m.group("hour")) |
|
652 |
minute = int(m.group("minute")) |
class FeedURLHandler(urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): |
653 |
second = int(m.group("second")) |
def http_error_default(self, req, fp, code, msg, headers): |
654 |
weekday = weekdays.index(m.group("weekday")) |
if ((code / 100) == 3) and (code != 304): |
655 |
a = int((14 - month) / 12) |
return self.http_error_302(req, fp, code, msg, headers) |
656 |
julian_day = (day - 32045 + int(((153 * (month + (12 * a) - 3)) + 2) / 5) + int((146097 * (year + 4800 - a)) / 400)) - (int((146097 * (year + 4799)) / 400) - 31738) + 1 |
from urllib import addinfourl |
657 |
daylight_savings_flag = 0 |
infourl = addinfourl(fp, headers, req.get_full_url()) |
658 |
return (year, month, day, hour, minute, second, weekday, julian_day, daylight_savings_flag) |
infourl.status = code |
659 |
except: |
return infourl |
660 |
# the month or weekday lookup probably failed indicating an invalid timestamp |
# raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp) |
661 |
return None |
|
662 |
|
def http_error_302(self, req, fp, code, msg, headers): |
663 |
def parse(uri, etag=None, modified=None, agent=None, referrer=None): |
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) |
664 |
r = RSSParser() |
infourl.status = code |
665 |
f = open_resource(uri, etag=etag, modified=modified, agent=agent, referrer=referrer) |
return infourl |
666 |
data = f.read() |
|
667 |
if hasattr(f, "headers"): |
def http_error_301(self, req, fp, code, msg, headers): |
668 |
if f.headers.get('content-encoding', '') == 'gzip': |
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) |
669 |
try: |
infourl.status = code |
670 |
data = gzip.GzipFile(fileobj=StringIO.StringIO(data)).read() |
return infourl |
671 |
except: |
|
672 |
# some feeds claim to be gzipped but they're not, so we get garbage |
http_error_300 = http_error_302 |
673 |
data = '' |
http_error_307 = http_error_302 |
674 |
r.feed(data) |
|
675 |
result = {"channel": r.channel, "items": r.items} |
def open_resource(source, etag=None, modified=None, agent=None, referrer=None): |
676 |
newEtag = get_etag(f) |
""" |
677 |
if newEtag: result["etag"] = newEtag |
URI, filename, or string --> stream |
678 |
elif etag: result["etag"] = etag |
|
679 |
newModified = get_modified(f) |
This function lets you define parsers that take any input source |
680 |
if newModified: result["modified"] = newModified |
(URL, pathname to local or network file, or actual data as a string) |
681 |
elif modified: result["modified"] = modified |
and deal with it in a uniform manner. Returned object is guaranteed |
682 |
f.close() |
to have all the basic stdio read methods (read, readline, readlines). |
683 |
if hasattr(f, "url"): |
Just .close() the object when you're done with it. |
684 |
result["url"] = f.url |
|
685 |
if hasattr(f, "headers"): |
If the etag argument is supplied, it will be used as the value of an |
686 |
result["headers"] = f.headers.dict |
If-None-Match request header. |
687 |
if hasattr(f, "status"): |
|
688 |
result["status"] = f.status |
If the modified argument is supplied, it must be a tuple of 9 integers |
689 |
elif hasattr(f, "url"): |
as returned by gmtime() in the standard Python time module. This MUST |
690 |
result["status"] = 200 |
be in GMT (Greenwich Mean Time). The formatted date/time will be used |
691 |
return result |
as the value of an If-Modified-Since request header. |
692 |
|
|
693 |
TEST_SUITE = ('http://www.pocketsoap.com/rssTests/rss1.0withModules.xml', |
If the agent argument is supplied, it will be used as the value of a |
694 |
'http://www.pocketsoap.com/rssTests/rss1.0withModulesNoDefNS.xml', |
User-Agent request header. |
695 |
'http://www.pocketsoap.com/rssTests/rss1.0withModulesNoDefNSLocalNameClash.xml', |
|
696 |
'http://www.pocketsoap.com/rssTests/rss2.0noNSwithModules.xml', |
If the referrer argument is supplied, it will be used as the value of a |
697 |
'http://www.pocketsoap.com/rssTests/rss2.0noNSwithModulesLocalNameClash.xml', |
Referer[sic] request header. |
698 |
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModules.xml', |
""" |
699 |
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModulesNoDefNS.xml', |
|
700 |
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModulesNoDefNSLocalNameClash.xml') |
if hasattr(source, "read"): |
701 |
|
return source |
702 |
|
|
703 |
if __name__ == '__main__': |
if source == "-": |
704 |
import sys |
return sys.stdin |
705 |
if sys.argv[1:]: |
|
706 |
urls = sys.argv[1:] |
if not agent: |
707 |
else: |
agent = USER_AGENT |
708 |
urls = TEST_SUITE |
|
709 |
from pprint import pprint |
# try to open with urllib2 (to use optional headers) |
710 |
for url in urls: |
request = urllib2.Request(source) |
711 |
print url |
if etag: |
712 |
print |
request.add_header("If-None-Match", etag) |
713 |
result = parse(url) |
if modified: |
714 |
pprint(result) |
request.add_header("If-Modified-Since", format_http_date(modified)) |
715 |
print |
request.add_header("User-Agent", agent) |
716 |
|
if referrer: |
717 |
|
request.add_header("Referer", referrer) |
718 |
|
request.add_header("Accept-encoding", "gzip") |
719 |
|
opener = urllib2.build_opener(FeedURLHandler()) |
720 |
|
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent |
721 |
|
try: |
722 |
|
return opener.open(request) |
723 |
|
except: |
724 |
|
# source is not a valid URL, but it might be a valid filename |
725 |
|
pass |
726 |
|
|
727 |
|
# try to open with native open function (if source is a filename) |
728 |
|
try: |
729 |
|
return open(source) |
730 |
|
except: |
731 |
|
pass |
732 |
|
|
733 |
|
# treat source as string |
734 |
|
return StringIO.StringIO(str(source)) |
735 |
|
|
736 |
|
def parse(uri, etag=None, modified=None, agent=None, referrer=None): |
737 |
|
r = FeedParser() |
738 |
|
f = open_resource(uri, etag=etag, modified=modified, agent=agent, referrer=referrer) |
739 |
|
data = f.read() |
740 |
|
if hasattr(f, "headers"): |
741 |
|
if f.headers.get('content-encoding', '') == 'gzip': |
742 |
|
try: |
743 |
|
data = gzip.GzipFile(fileobj=StringIO.StringIO(data)).read() |
744 |
|
except: |
745 |
|
# some feeds claim to be gzipped but they're not, so we get garbage |
746 |
|
data = '' |
747 |
|
r.feed(data) |
748 |
|
result = {"channel": r.channel, "items": r.items} |
749 |
|
newEtag = get_etag(f) |
750 |
|
if newEtag: result["etag"] = newEtag |
751 |
|
elif etag: result["etag"] = etag |
752 |
|
newModified = get_modified(f) |
753 |
|
if newModified: result["modified"] = newModified |
754 |
|
elif modified: result["modified"] = modified |
755 |
|
if hasattr(f, "url"): |
756 |
|
result["url"] = f.url |
757 |
|
if hasattr(f, "headers"): |
758 |
|
result["headers"] = f.headers.dict |
759 |
|
if hasattr(f, "status"): |
760 |
|
result["status"] = f.status |
761 |
|
elif hasattr(f, "url"): |
762 |
|
result["status"] = 200 |
763 |
|
# get the xml encoding |
764 |
|
if result.get('encoding', '') == '': |
765 |
|
xmlheaderRe = re.compile('<\?.*encoding="(.*)".*\?>') |
766 |
|
match = xmlheaderRe.match(data) |
767 |
|
if match: |
768 |
|
result['encoding'] = match.groups()[0].lower() |
769 |
|
f.close() |
770 |
|
return result |
771 |
|
|
772 |
|
TEST_SUITE = ('http://www.pocketsoap.com/rssTests/rss1.0withModules.xml', |
773 |
|
'http://www.pocketsoap.com/rssTests/rss1.0withModulesNoDefNS.xml', |
774 |
|
'http://www.pocketsoap.com/rssTests/rss1.0withModulesNoDefNSLocalNameClash.xml', |
775 |
|
'http://www.pocketsoap.com/rssTests/rss2.0noNSwithModules.xml', |
776 |
|
'http://www.pocketsoap.com/rssTests/rss2.0noNSwithModulesLocalNameClash.xml', |
777 |
|
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModules.xml', |
778 |
|
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModulesNoDefNS.xml', |
779 |
|
'http://www.pocketsoap.com/rssTests/rss2.0NSwithModulesNoDefNSLocalNameClash.xml') |
780 |
|
|
781 |
|
if __name__ == '__main__': |
782 |
|
import sys |
783 |
|
if sys.argv[1:]: |
784 |
|
urls = sys.argv[1:] |
785 |
|
else: |
786 |
|
urls = TEST_SUITE |
787 |
|
from pprint import pprint |
788 |
|
for url in urls: |
789 |
|
print url |
790 |
|
print |
791 |
|
result = parse(url) |
792 |
|
pprint(result) |
793 |
|
print |
794 |
|
|
795 |
|
""" |
796 |
|
TODO |
797 |
|
- textinput/textInput |
798 |
|
- image |
799 |
|
- author |
800 |
|
- contributor |
801 |
|
- comments |
802 |
|
""" |
803 |
|
|
804 |
|
|