--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/.gitignore Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,2 @@
+*.pyc
+.DS_Store
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/ChangeLog.txt Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,293 @@
+# web.py changelog
+
+## 2009-06-04 0.32
+
+* optional from_address to web.emailerrors
+* upgrade wsgiserver to CherryPy/3.1.2
+* support for extensions in Jinja2 templates (tx Zhang Huangbin)
+* support web.datestr for datetime.date objects also
+* support for lists in db queries
+* new: uniq and iterview
+* fix: set debug=False when application is run with mod_wsgi (tx Patrick Swieskowski) [Bug#370904](https://bugs.launchpad.net/webpy/+bug/370904)
+* fix: make web.commify work with decimals [Bug#317204](https://bugs.launchpad.net/webpy/+bug/317204)
+* fix: unicode issues with sqlite database [Bug#373219](https://bugs.launchpad.net/webpy/+bug/373219)
+* fix: urlquote url when the server is lighttpd [Bug#339858](https://bugs.launchpad.net/webpy/+bug/339858)
+* fix: issue with using date.format in templates
+* fix: use TOP instead of LIMIT in mssql database [Bug#324049](https://bugs.launchpad.net/webpy/+bug/324049)
+* fix: make sessions work well with expirations
+* fix: accept both list and tuple as arg values in form.Dropdown [Bug#314970](https://bugs.launchpad.net/webpy/+bug/314970)
+* fix: match parenthesis when parsing `for` statement in templates
+* fix: fix python 2.3 compatibility
+* fix: ignore dot folders when compiling templates (tx Stuart Langridge)
+* fix: don't consume KeyboardInterrupt and SystemExit errors
+* fix: make application work well with iterators
+
+## 2008-12-10: 0.31
+
+* new: browser module
+* new: test utilities
+* new: ShelfStore
+* fix: web.cookies error when default is None
+* fix: paramstyle for OracleDB (tx kromakey)
+* fix: performance issue in SQLQuery.join
+* fix: use wsgi.url_scheme to find ctx.protocol
+
+## 2008-12-06: 0.3
+
+* new: replace print with return (<i>backward-incompatible</i>)
+* new: application framework (<i>backward-incompatible</i>)
+* new: modular database system (<i>backward-incompatible</i>)
+* new: templetor reimplementation
+* new: better unicode support
+* new: debug mode (web.config.debug)
+* new: better db pooling
+* new: sessions
+* new: support for GAE
+* new: etag support
+* new: web.openid module
+* new: web.nthstr
+* fix: various form.py fixes
+* fix: python 2.6 compatibility
+* fix: file uploads are not loaded into memory
+* fix: SQLLiteral issue (Bug#180027)
+* change: web.background is moved to experimental (<i>backward-incompatible</i>)
+* improved API doc generation (tx Colin Rothwell)
+
+## 2008-01-19: 0.23
+
+* fix: for web.background gotcha ([133079](http://bugs.launchpad.net/webpy/+bug/133079))
+* fix: for postgres unicode bug ([177265](http://bugs.launchpad.net/webpy/+bug/177265))
+* fix: web.profile behavior in python 2.5 ([133080](http://bugs.launchpad.net/webpy/+bug/133080))
+* fix: only uppercase HTTP methods are allowed. ([176415](http://bugs.launchpad.net/webpy/+bug/176415))
+* fix: transaction error in with statement ([125118](http://bugs.launchpad.net/webpy/+bug/125118))
+* fix: fix in web.reparam ([162085](http://bugs.launchpad.net/webpy/+bug/162085))
+* fix: various unicode issues ([137042](http://bugs.launchpad.net/webpy/+bug/137042), [180510](http://bugs.launchpad.net/webpy/+bug/180510), [180549](http://bugs.launchpad.net/webpy/+bug/180549), [180653](http://bugs.launchpad.net/webpy/+bug/180653))
+* new: support for https
+* new: support for secure cookies
+* new: sendmail
+* new: htmlunquote
+
+## 2007-08-23: 0.22
+
+* compatibility with new DBUtils API ([122112](https://bugs.launchpad.net/webpy/+bug/122112))
+* fix reloading ([118683](https://bugs.launchpad.net/webpy/+bug/118683))
+* fix compatibility between `changequery` and `redirect` ([118234](https://bugs.launchpad.net/webpy/+bug/118234))
+* fix relative URI in `web.redirect` ([118236](https://bugs.launchpad.net/webpy/+bug/118236))
+* fix `ctx._write` support in built-in HTTP server ([121908](https://bugs.launchpad.net/webpy/+bug/121908))
+* fix `numify` strips things after '.'s ([118644](https://bugs.launchpad.net/webpy/+bug/118644))
+* fix various unicode isssues ([114703](https://bugs.launchpad.net/webpy/+bug/114703), [120644](https://bugs.launchpad.net/webpy/+bug/120644), [124280](https://bugs.launchpad.net/webpy/+bug/124280))
+
+## 2007-05-28: 0.21
+
+* <strong>security fix:</strong> prevent bad characters in headers
+* support for cheetah template reloading
+* support for form validation
+* new `form.File`
+* new `web.url`
+* fix rendering issues with hidden and button inputs
+* fix 2.3 incompatability with `numify`
+* fix multiple headers with same name
+* fix web.redirect issues when homepath is not /
+* new CherryPy wsgi server
+* new nested transactions
+* new sqlliteral
+
+## 2006-05-09: 0.138
+
+* New function: `intget`
+* New function: `datestr`
+* New function: `validaddr`
+* New function: `sqlwhere`
+* New function: `background`, `backgrounder`
+* New function: `changequery`
+* New function: `flush`
+* New function: `load`, `unload`
+* New variable: `loadhooks`, `unloadhooks`
+* Better docs; generating [docs](documentation) from web.py now
+* global variable `REAL_SCRIPT_NAME` can now be used to work around lighttpd madness
+* fastcgi/scgi servers now can listen on sockets
+* `output` now encodes Unicode
+* `input` now takes optional `_method` argument
+* <strong>Potentially-incompatible change:</strong> `input` now returns `badrequest` automatically when `requireds` aren't found
+* `storify` now takes lists and dictionaries as requests (see docs)
+* `redirect` now blanks any existing output
+* Quote SQL better when `db_printing` is on
+* Fix delay in `nomethod`
+* Fix `urlquote` to encode better.
+* Fix 2.3 incompatibility with `iters` (tx ??)
+* Fix duplicate headers
+* Improve `storify` docs
+* Fix `IterBetter` to raise IndexError, not KeyError
+
+## 2006-03-27: 0.137
+
+* Add function `dictfindall` (tx Steve Huffman)
+* Add support to `autodelegate` for arguments
+* Add functions `httpdate` and `parsehttpdate`
+* Add function `modified`
+* Add support for FastCGI server mode
+* Clarify `dictadd` documentation (tx Steve Huffman)
+* Changed license to public domain
+* Clean up to use `ctx` and `env` instead of `context` and `environ`
+* Improved support for PUT, DELETE, etc. (tx list)
+* Fix `ctx.fullpath` (tx Jesir Vargas)
+* Fix sqlite support (tx Dubhead)
+* Fix documentation bug in `lstrips` (tx Gregory Petrosyan)
+* Fix support for IPs and ports (1/2 tx Jesir Vargas)
+* Fix `ctx.fullpath` (tx Jesir Vargas)
+* Fix sqlite support (tx Dubhead)
+* Fix documentation bug in `lstrips` (tx Gregory Petrosyan)
+* Fix `iters` bug with sets
+* Fix some breakage introduced by Vargas's patch
+* Fix `sqlors` bug
+* Fix various small style things (tx Jesir Vargas)
+* Fix bug with `input` ignoring GET input
+
+## 2006-02-22: 0.136 (svn)
+
+* Major code cleanup (tx to Jesir Vargas for the patch).
+* 2006-02-15: 0.135
+* Really fix that mysql regression (tx Sean Leach).
+* 2006-02-15: 0.134
+* The `StopIteration` exception is now caught. This can be used by functions that do things like check to see if a user is logged in. If the user isn't, they can output a message with a login box and raise StopIteration, preventing the caller from executing.
+* Fix some documentation bugs.
+* Fix mysql regression (tx mrstone).
+
+## 2006-02-12: 0.133
+
+* Docstrings! (tx numerous, esp. Jonathan Mark (for the patch) and Guido van Rossum (for the prod))
+* Add `set` to web.iters.
+* Make the `len` returned by `query` an int (tx ??).
+* <strong>Backwards-incompatible change:</strong> `base` now called `prefixurl`.
+* <strong>Backwards-incompatible change:</strong> `autoassign` now takes `self` and `locals()` as arguments.
+
+## 2006-02-07: 0.132
+
+* New variable `iters` is now a listing of possible list-like types (currently list, tuple, and, if it exists, Set).
+* New function `dictreverse` turns `{1:2}` into `{2:1}`.
+* `Storage` now a dictionary subclass.
+* `tryall` now takes an optional prefix of functions to run.
+* `sqlors` has various improvements.
+* Fix a bunch of DB API bugs.
+* Fix bug with `storify` when it received multiple inputs (tx Ben Woosley).
+* Fix bug with returning a generator (tx Zbynek Winkler).
+* Fix bug where len returned a long on query results (tx F.S).
+
+
+## 2006-01-31: 0.131 (not officially released)
+
+* New function `_interpolate` used internally for interpolating strings.
+* Redone database API. `select`, `insert`, `update`, and `delete` all made consistent. Database queries can now do more complicated expressions like `$foo.bar` and `${a+b}`. You now have to explicitly pass the dictionary to look up variables in. Pass `vars=locals()` to get the old functionality of looking up variables .
+* New functions `sqllist` and `sqlors` generate certain kinds of SQL.
+
+## 2006-01-30: 0.13
+
+* New functions `found`, `seeother`, and `tempredirect` now let you do other kinds of redirects. `redirect` now also takes an optional status parameter. (tx many)
+* New functions `expires` and `lastmodified` make it easy to send those headers.
+* New function `gone` returns a 410 Gone (tx David Terrell).
+* New function `urlquote` applies url encoding to a string.
+* New function `iterbetter` wraps an iterator and allows you to do __getitem__s on it.
+* Have `query` return an `iterbetter` instead of an iterator.
+* Have `debugerror` show tracebacks with the innermost frame first.
+* Add `__hash__` function to `threadeddict` (and thus, `ctx`).
+* Add `context.host` value for the requested host name.
+* Add option `db_printing` that prints database queries and the time they take.
+* Add support for database pooling (tx Steve Huffman).
+* Add support for passing values to functions called by `handle`. If you do `('foo', 'value')` it will add `'value'` as an argument when it calls `foo`.
+* Add support for scgi (tx David Terrell for the patch).
+* Add support for web.py functions that are iterators (tx Brendan O'Connor for the patch).
+* Use new database cursors on each call instead of reusing one.
+* `setcookie` now takes an optional `domain` argument.
+* Fix bug in autoassign.
+* Fix bug where `debugerror` would break on objects it couldn't display.
+* Fix bug where you couldn't do `#include`s inline.
+* Fix bug with `reloader` and database calls.
+* Fix bug with `reloader` and base templates.
+* Fix bug with CGI mode on certain operating systems.
+* Fix bug where `debug` would crash if called outside a request.
+* Fix bug with `context.ip` giving weird values with proxies.
+
+## 2006-01-29: 0.129
+
+* Add Python 2.2 support.
+
+## 2006-01-28: 0.128
+
+* Fix typo in `web.profile`.
+
+## 2006-01-28: 0.127
+
+* Fix bug in error message if invalid dbn is sent (tx Panos Laganakos).
+
+## 2006-01-27: 0.126
+
+* Fix typos in Content-Type headers (tx Beat Bolli for the prod).
+
+## 2006-01-22: 0.125
+
+* Support Cheetah 2.0.
+
+## 2006-01-22: 0.124
+
+* Fix spacing bug (tx Tommi Raivio for the prod).
+
+## 2006-01-16: 0.123
+
+* Fix bug with CGI usage (tx Eddie Sowden for the prod).
+
+## 2006-01-14: 0.122
+
+* Allow DELETEs from `web.query` (tx Joost Molenaar for the prod).
+
+## 2006-01-08: 0.121
+
+* Allow import of submodules like `pkg.mod.cn` (tx Sridhar Ratna).
+* Fix a bug in `update` (tx Sergey Khenkin).
+
+## 2006-01-05: 0.12
+
+* <strong>Backwards-incompatible change:</strong> `db_parameters` is now a dictionary.
+* <strong>Backwards-incompatible change:</strong> `sumdicts` is now `dictadd`.
+* Add support for PyGreSQL, MySQL (tx Hallgrimur H. Gunnarsson).
+* Use HTML for non-Cheetah error message.
+* New function `htmlquote()`.
+* New function `tryall()`.
+* `ctx.output` can now be set to a generator. (tx Brendan O'Connor)
+
+## 2006-01-04: 0.117
+
+* Add support for psycopg 1.x. (tx Gregory Price)
+
+## 2006-01-04: 0.116
+
+* Add support for Python 2.3. (tx Evan Jones)
+
+## 2006-01-04: 0.115
+
+* Fix some bugs where database queries weren't reparameterized. Oops!
+* Fix a bug where `run()` wasn't getting the right functions.
+* Remove a debug statement accidentally left in.
+* Allow `storify` to be used on dictionaries. (tx Joseph Trent)
+
+## 2006-01-04: 0.114
+
+* Make `reloader` work on Windows. (tx manatlan)
+* Fix some small typos that affected colorization. (tx Gregory Price)
+
+## 2006-01-03: 0.113
+
+* Reorganize `run()` internals so mod_python can be used. (tx Nicholas Matsakis)
+
+## 2006-01-03: 0.112
+
+* Make `reloader` work when `code.py` is called with a full path. (tx David Terrell)
+
+## 2006-01-03: 0.111
+
+* Fixed bug in `strips()`. (tx Michael Josephson)
+
+## 2006-01-03: 0.11
+
+* First public version.
+
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/LICENSE.txt Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,4 @@
+web.py is in the public domain; it can be used for whatever purpose with absolutely no restrictions.
+
+CherryPy WSGI server that is included in the web.py as web.wsgiserver is licensed under CherryPy License. See web/wsgiserver/LICENSE.txt for more details.
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/experimental/background.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,43 @@
+"""Helpers functions to run log-running tasks."""
+from web import utils
+from web import webapi as web
+
+def background(func):
+ """A function decorator to run a long-running function as a background thread."""
+ def internal(*a, **kw):
+ web.data() # cache it
+
+ tmpctx = web._context[threading.currentThread()]
+ web._context[threading.currentThread()] = utils.storage(web.ctx.copy())
+
+ def newfunc():
+ web._context[threading.currentThread()] = tmpctx
+ func(*a, **kw)
+ myctx = web._context[threading.currentThread()]
+ for k in myctx.keys():
+ if k not in ['status', 'headers', 'output']:
+ try: del myctx[k]
+ except KeyError: pass
+
+ t = threading.Thread(target=newfunc)
+ background.threaddb[id(t)] = t
+ t.start()
+ web.ctx.headers = []
+ return seeother(changequery(_t=id(t)))
+ return internal
+background.threaddb = {}
+
+def backgrounder(func):
+ def internal(*a, **kw):
+ i = web.input(_method='get')
+ if '_t' in i:
+ try:
+ t = background.threaddb[int(i._t)]
+ except KeyError:
+ return web.notfound()
+ web._context[threading.currentThread()] = web._context[t]
+ return
+ else:
+ return func(*a, **kw)
+ return internal
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/experimental/migration.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,99 @@
+"""Migration script to run web.py 0.23 programs using 0.3.
+
+Import this module at the beginning of your program.
+"""
+import web
+import sys
+
+def setup_database():
+ if web.config.get('db_parameters'):
+ db = web.database(**web.config.db_parameters)
+ web.insert = db.insert
+ web.select = db.select
+ web.update = db.update
+ web.delete = db.delete
+ web.query = db.query
+
+ def transact():
+ t = db.transaction()
+ web.ctx.setdefault('transaction_stack', []).append(t)
+
+ def rollback():
+ stack = web.ctx.get('transaction_stack')
+ t = stack and stack.pop()
+ t and t.rollback()
+
+ def commit():
+ stack = web.ctx.get('transaction_stack')
+ t = stack and stack.pop()
+ t and t.commit()
+
+ web.transact = transact
+ web.rollback = rollback
+ web.commit = commit
+
+web.loadhooks = web.webapi.loadhooks = {}
+web._loadhooks = web.webapi._loadhooks = {}
+web.unloadhooks = web.webapi.unloadhooks = {}
+
+def load():
+ setup_database()
+
+web.load = load
+
+def run(urls, fvars, *middleware):
+ setup_database()
+
+ def stdout_processor(handler):
+ handler()
+ return web.ctx.get('output', '')
+
+ def hook_processor(handler):
+ for h in web.loadhooks.values() + web._loadhooks.values(): h()
+ output = handler()
+ for h in web.unloadhooks.values(): h()
+ return output
+
+ app = web.application(urls, fvars)
+ app.add_processor(stdout_processor)
+ app.add_processor(hook_processor)
+ app.run(*middleware)
+
+class _outputter:
+ """Wraps `sys.stdout` so that print statements go into the response."""
+ def __init__(self, file): self.file = file
+ def write(self, string_):
+ if hasattr(web.ctx, 'output'):
+ return output(string_)
+ else:
+ self.file.write(string_)
+ def __getattr__(self, attr): return getattr(self.file, attr)
+ def __getitem__(self, item): return self.file[item]
+
+def output(string_):
+ """Appends `string_` to the response."""
+ string_ = web.utf8(string_)
+ if web.ctx.get('flush'):
+ web.ctx._write(string_)
+ else:
+ web.ctx.output += str(string_)
+
+def _capturedstdout():
+ sysstd = sys.stdout
+ while hasattr(sysstd, 'file'):
+ if isinstance(sys.stdout, _outputter): return True
+ sysstd = sysstd.file
+ if isinstance(sys.stdout, _outputter): return True
+ return False
+
+if not _capturedstdout():
+ sys.stdout = _outputter(sys.stdout)
+
+web.run = run
+
+class Stowage(web.storage):
+ def __str__(self):
+ return self._str
+
+web.template.Stowage = web.template.stowage = Stowage
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/experimental/pwt.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,96 @@
+import web
+import simplejson, sudo
+urls = (
+ '/sudo', 'sudoku',
+ '/length', 'length',
+)
+
+
+class pwt(object):
+ _inFunc = False
+ updated = {}
+ page = """
+<script src="/static/prototype.js"></script>
+<script src="/static/behaviour.js"></script>
+<script>
+Behaviour.register({'input': function (e) {
+ e.onmouseup = e.onkeyup = e.onchange = function () { send(e) }
+}})
+</script>
+
+<form name="main" onsubmit="return false;">%s</form>
+
+<script>
+function send(e) {
+ ajax = new Ajax.Request(document.location, {method:'post', parameters:
+ Form.serialize(document.forms.main)
+ });
+}
+
+function receive(d) {
+ $H(d).keys().each(function (key) {
+ v = d[key];
+ k = document.forms.main[key];
+
+ if (k) k.value = v;
+ else $(key).innerHTML = v;
+ })
+}
+</script>
+"""
+
+ def GET(self):
+ web.header('Content-Type', 'text/html')
+ print self.page % self.form()
+
+ def POST(self):
+ i = web.input()
+ if '_' in i: del i['_']
+ #for k, v in i.iteritems(): setattr(self, k, v)
+
+ self._inFunc = True
+ self.work(**i)
+ self._inFunc = False
+
+ web.header('Content-Type', 'text/javascript')
+ print 'receive('+simplejson.dumps(self.updated)+');'
+
+ def __setattr__(self, k, v):
+ if self._inFunc and k != '_inFunc':
+ self.updated[k] = v
+ object.__setattr__(self, k, v)
+
+class sudoku(pwt):
+ def form(self):
+ import sudo
+ out = ''
+ n = 0
+ for i in range(9):
+ for j in range(9):
+ out += '<input type="text" size="1" name="%s" />' % (sudo.squares[n])
+ n += 1
+ out += '<br />'
+
+ return out
+
+ def work(self, **kw):
+ values = dict((s, sudo.digits) for s in sudo.squares)
+ for k, v in kw.iteritems():
+ if v:
+ sudo.assign(values, k, v)
+
+ for k, v in values.iteritems():
+ if len(v) == 1:
+ setattr(self, k, v)
+
+ return values
+
+class length(pwt):
+ def form(self):
+ return '<p id="output"> </p><input type="range" name="n" value="0" />'
+
+ def work(self):
+ self.output = ('a' * web.intget(self.n, 0) or ' ')
+
+if __name__ == "__main__":
+ web.run(urls, globals(), web.reloader)
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/experimental/untwisted.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,134 @@
+import random
+
+from twisted.internet import reactor, defer
+from twisted.web import http
+
+import simplejson
+
+import web
+
+class Request(http.Request):
+ def process(self):
+ self.content.seek(0, 0)
+ env = {
+ 'REMOTE_ADDR': self.client.host,
+ 'REQUEST_METHOD': self.method,
+ 'PATH_INFO': self.path,
+ 'CONTENT_LENGTH': web.intget(self.getHeader('content-length'), 0),
+ 'wsgi.input': self.content
+ }
+ if '?' in self.uri:
+ env['QUERY_STRING'] = self.uri.split('?', 1)[1]
+
+ for k, v in self.received_headers.iteritems():
+ env['HTTP_' + k.upper()] = v
+
+ if self.path.startswith('/static/'):
+ f = web.lstrips(self.path, '/static/')
+ assert '/' not in f
+ #@@@ big security hole
+ self.write(file('static/' + f).read())
+ return self.finish()
+
+ web.webapi._load(env)
+ web.ctx.trequest = self
+ result = self.actualfunc()
+ self.setResponseCode(int(web.ctx.status.split()[0]))
+ for (h, v) in web.ctx.headers:
+ self.setHeader(h, v)
+ self.write(web.ctx.output)
+ if not web.ctx.get('persist'):
+ self.finish()
+
+class Server(http.HTTPFactory):
+ def __init__(self, func):
+ self.func = func
+
+ def buildProtocol(self, addr):
+ """Generate a channel attached to this site.
+ """
+ channel = http.HTTPFactory.buildProtocol(self, addr)
+ class MyRequest(Request):
+ actualfunc = staticmethod(self.func)
+ channel.requestFactory = MyRequest
+ channel.site = self
+ return channel
+
+def runtwisted(func):
+ reactor.listenTCP(8086, Server(func))
+ reactor.run()
+
+def newrun(inp, fvars):
+ print "Running on http://0.0.0.0:8086/"
+ runtwisted(web.webpyfunc(inp, fvars, False))
+
+def iframe(url):
+ return """
+ <iframe height="0" width="0" style="display: none" src="%s"/></iframe>
+ """ % url #("http://%s.ajaxpush.lh.theinfo.org:8086%s" % (random.random(), url))
+
+class Feed:
+ def __init__(self):
+ self.sessions = []
+
+ def subscribe(self):
+ request = web.ctx.trequest
+ self.sessions.append(request)
+ request.connectionLost = lambda reason: self.sessions.remove(request)
+ web.ctx.persist = True
+
+ def publish(self, text):
+ for x in self.sessions:
+ x.write(text)
+
+class JSFeed(Feed):
+ def __init__(self, callback="callback"):
+ Feed.__init__(self)
+ self.callback = callback
+
+ def publish(self, obj):
+ web.debug("publishing")
+ Feed.publish(self,
+ '<script type="text/javascript">window.parent.%s(%s)</script>' % (self.callback, simplejson.dumps(obj) +
+ " " * 2048))
+
+if __name__ == "__main__":
+ mfeed = JSFeed()
+
+ urls = (
+ '/', 'view',
+ '/js', 'js',
+ '/send', 'send'
+ )
+
+ class view:
+ def GET(self):
+ print """
+<script type="text/javascript">
+function callback(item) {
+ document.getElementById('content').innerHTML += "<p>" + item + "</p>";
+}
+</script>
+
+<h2>Today's News</h2>
+
+<div id="content"></div>
+
+<h2>Contribute</h2>
+<form method="post" action="/send">
+ <textarea name="text"></textarea>
+ <input type="submit" value="send" />
+</form>
+<iframe id="foo" height="0" width="0" style="display: none" src="/js"/></iframe>
+ """
+
+ class js:
+ def GET(self):
+ mfeed.subscribe()
+
+ class send:
+ def POST(self):
+ mfeed.publish('<p>%s</p>' % web.input().text + (" " * 2048))
+ web.seeother('/')
+
+ newrun(urls, globals())
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/setup.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# ...
+
+from distutils.core import setup
+
+setup(name='web.py',
+ version='0.32',
+ description='web.py: makes web apps',
+ author='Aaron Swartz',
+ author_email='me@aaronsw.com',
+ maintainer='Anand Chitipothu',
+ maintainer_email='anandology@gmail.com',
+ url=' http://webpy.org/',
+ packages=['web', 'web.wsgiserver', 'web.contrib'],
+ long_description="Think about the ideal way to write a web app. Write the code to make it happen.",
+ license="Public domain",
+ platforms=["any"],
+ )
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/README Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,27 @@
+# web.py unit tests
+
+## Setup
+
+All databases expect a database with name `webpy` with username `scott` and password `tiger`.
+
+## Running all tests
+
+To run all tests:
+
+ $ python test/alltests.py
+
+## Running individual tests
+
+To run all tests in a file:
+
+ $ python test/db.py
+
+To run all tests in a class:
+
+ $ python test/db.py SqliteTest
+
+To run a single test:
+
+ $ python test/db.py SqliteTest.testUnicode
+
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/alltests.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,8 @@
+import webtest
+
+def suite():
+ modules = ["doctests", "db", "application", "session"]
+ return webtest.suite(modules)
+
+if __name__ == "__main__":
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/application.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,297 @@
+import webtest
+import time
+
+import web
+import urllib
+
+data = """
+import web
+
+urls = ("/", "%(classname)s")
+app = web.application(urls, globals(), autoreload=True)
+
+class %(classname)s:
+ def GET(self):
+ return "%(output)s"
+
+"""
+
+urls = (
+ "/iter", "do_iter",
+)
+app = web.application(urls, globals())
+
+class do_iter:
+ def GET(self):
+ yield 'hello, '
+ yield web.input(name='world').name
+
+ POST = GET
+
+def write(filename, data):
+ f = open(filename, 'w')
+ f.write(data)
+ f.close()
+
+class ApplicationTest(webtest.TestCase):
+ def test_reloader(self):
+ write('foo.py', data % dict(classname='a', output='a'))
+ import foo
+ app = foo.app
+
+ self.assertEquals(app.request('/').data, 'a')
+
+ # test class change
+ time.sleep(1)
+ write('foo.py', data % dict(classname='a', output='b'))
+ self.assertEquals(app.request('/').data, 'b')
+
+ # test urls change
+ time.sleep(1)
+ write('foo.py', data % dict(classname='c', output='c'))
+ self.assertEquals(app.request('/').data, 'c')
+
+ def testUppercaseMethods(self):
+ urls = ("/", "hello")
+ app = web.application(urls, locals())
+ class hello:
+ def GET(self): return "hello"
+ def internal(self): return "secret"
+
+ response = app.request('/', method='internal')
+ self.assertEquals(response.status, '405 Method Not Allowed')
+
+ def testRedirect(self):
+ urls = (
+ "/a", "redirect /hello/",
+ "/b/(.*)", r"redirect /hello/\1",
+ "/hello/(.*)", "hello"
+ )
+ app = web.application(urls, locals())
+ class hello:
+ def GET(self, name):
+ name = name or 'world'
+ return "hello " + name
+
+ response = app.request('/a')
+ self.assertEquals(response.status, '301 Moved Permanently')
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/')
+
+ response = app.request('/a?x=2')
+ self.assertEquals(response.status, '301 Moved Permanently')
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/?x=2')
+
+ response = app.request('/b/foo?x=2')
+ self.assertEquals(response.status, '301 Moved Permanently')
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/foo?x=2')
+
+ def test_subdirs(self):
+ urls = (
+ "/(.*)", "blog"
+ )
+ class blog:
+ def GET(self, path):
+ return "blog " + path
+ app_blog = web.application(urls, locals())
+
+ urls = (
+ "/blog", app_blog,
+ "/(.*)", "index"
+ )
+ class index:
+ def GET(self, path):
+ return "hello " + path
+ app = web.application(urls, locals())
+
+ self.assertEquals(app.request('/blog/foo').data, 'blog foo')
+ self.assertEquals(app.request('/foo').data, 'hello foo')
+
+ def processor(handler):
+ return web.ctx.path + ":" + handler()
+ app.add_processor(processor)
+ self.assertEquals(app.request('/blog/foo').data, '/blog/foo:blog foo')
+
+ def test_subdomains(self):
+ def create_app(name):
+ urls = ("/", "index")
+ class index:
+ def GET(self):
+ return name
+ return web.application(urls, locals())
+
+ urls = (
+ "a.example.com", create_app('a'),
+ "b.example.com", create_app('b'),
+ ".*.example.com", create_app('*')
+ )
+ app = web.subdomain_application(urls, locals())
+
+ def test(host, expected_result):
+ result = app.request('/', host=host)
+ self.assertEquals(result.data, expected_result)
+
+ test('a.example.com', 'a')
+ test('b.example.com', 'b')
+ test('c.example.com', '*')
+ test('d.example.com', '*')
+
+ def test_redirect(self):
+ urls = (
+ "/(.*)", "blog"
+ )
+ class blog:
+ def GET(self, path):
+ if path == 'foo':
+ raise web.seeother('/login', absolute=True)
+ else:
+ raise web.seeother('/bar')
+ app_blog = web.application(urls, locals())
+
+ urls = (
+ "/blog", app_blog,
+ "/(.*)", "index"
+ )
+ class index:
+ def GET(self, path):
+ return "hello " + path
+ app = web.application(urls, locals())
+
+ response = app.request('/blog/foo')
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/login')
+
+ response = app.request('/blog/foo', env={'SCRIPT_NAME': '/x'})
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/login')
+
+ response = app.request('/blog/foo2')
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/blog/bar')
+
+ response = app.request('/blog/foo2', env={'SCRIPT_NAME': '/x'})
+ self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/blog/bar')
+
+ def test_processors(self):
+ urls = (
+ "/(.*)", "blog"
+ )
+ class blog:
+ def GET(self, path):
+ return 'blog ' + path
+
+ state = web.storage(x=0, y=0)
+ def f():
+ state.x += 1
+
+ app_blog = web.application(urls, locals())
+ app_blog.add_processor(web.loadhook(f))
+
+ urls = (
+ "/blog", app_blog,
+ "/(.*)", "index"
+ )
+ class index:
+ def GET(self, path):
+ return "hello " + path
+ app = web.application(urls, locals())
+ def g():
+ state.y += 1
+ app.add_processor(web.loadhook(g))
+
+ app.request('/blog/foo')
+ assert state.x == 1 and state.y == 1, repr(state)
+ app.request('/foo')
+ assert state.x == 1 and state.y == 2, repr(state)
+
+ def testUnicodeInput(self):
+ urls = (
+ "(/.*)", "foo"
+ )
+ class foo:
+ def GET(self, path):
+ i = web.input(name='')
+ return repr(i.name)
+
+ def POST(self, path):
+ if path == '/multipart':
+ i = web.input(file={})
+ return i.file.value
+ else:
+ i = web.input()
+ return repr(dict(i))
+
+ app = web.application(urls, locals())
+
+ def f(name):
+ path = '/?' + urllib.urlencode({"name": name.encode('utf-8')})
+ self.assertEquals(app.request(path).data, repr(name))
+
+ f(u'\u1234')
+ f(u'foo')
+
+ response = app.request('/', method='POST', data=dict(name='foo'))
+ self.assertEquals(response.data, "{'name': u'foo'}")
+
+ data = '--boundary\r\nContent-Disposition: form-data; name="x"\r\nfoo\r\n--boundary\r\nContent-Disposition: form-data; name="file"; filename="a.txt"\r\nContent-Type: text/plain\r\n\r\na\r\n--boundary--\r\n'
+ headers = {'Content-Type': 'multipart/form-data; boundary=boundary'}
+ response = app.request('/multipart', method="POST", data=data, headers=headers)
+ self.assertEquals(response.data, 'a')
+
+ def testCustomNotFound(self):
+ urls_a = ("/", "a")
+ urls_b = ("/", "b")
+
+ app_a = web.application(urls_a, locals())
+ app_b = web.application(urls_b, locals())
+
+ app_a.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 1")
+
+ urls = (
+ "/a", app_a,
+ "/b", app_b
+ )
+ app = web.application(urls, locals())
+
+ def assert_notfound(path, message):
+ response = app.request(path)
+ self.assertEquals(response.status.split()[0], "404")
+ self.assertEquals(response.data, message)
+
+ assert_notfound("/a/foo", "not found 1")
+ assert_notfound("/b/foo", "not found")
+
+ app.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 2")
+ assert_notfound("/a/foo", "not found 1")
+ assert_notfound("/b/foo", "not found 2")
+
+ def testIter(self):
+ self.assertEquals(app.request('/iter').data, 'hello, world')
+ self.assertEquals(app.request('/iter?name=web').data, 'hello, web')
+
+ self.assertEquals(app.request('/iter', method='POST').data, 'hello, world')
+ self.assertEquals(app.request('/iter', method='POST', data='name=web').data, 'hello, web')
+
+ def testUnload(self):
+ x = web.storage(a=0)
+
+ urls = (
+ "/foo", "foo",
+ "/bar", "bar"
+ )
+ class foo:
+ def GET(self):
+ return "foo"
+ class bar:
+ def GET(self):
+ raise web.notfound()
+
+ app = web.application(urls, locals())
+ def unload():
+ x.a += 1
+ app.add_processor(web.unloadhook(unload))
+
+ app.request('/foo')
+ self.assertEquals(x.a, 1)
+
+ app.request('/bar')
+ self.assertEquals(x.a, 2)
+
+if __name__ == '__main__':
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/browser.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,64 @@
+import webtest
+import web
+
+urls = (
+ "/", "index",
+ "/hello/(.*)", "hello",
+ "/cookie", "cookie",
+ "/setcookie", "setcookie",
+ "/redirect", "redirect",
+)
+app = web.application(urls, globals())
+
+class index:
+ def GET(self):
+ return "welcome"
+
+class hello:
+ def GET(self, name):
+ name = name or 'world'
+ return "hello, " + name + '!'
+
+class cookie:
+ def GET(self):
+ return ",".join(sorted(web.cookies().keys()))
+
+class setcookie:
+ def GET(self):
+ i = web.input()
+ for k, v in i.items():
+ web.setcookie(k, v)
+ return "done"
+
+class redirect:
+ def GET(self):
+ i = web.input(url='/')
+ raise web.seeother(i.url)
+
+class BrowserTest(webtest.TestCase):
+ def testCookies(self):
+ b = app.browser()
+ b.open('http://0.0.0.0/setcookie?x=1&y=2')
+ b.open('http://0.0.0.0/cookie')
+ self.assertEquals(b.data, 'x,y')
+
+ def testNotfound(self):
+ b = app.browser()
+ b.open('http://0.0.0.0/notfound')
+ self.assertEquals(b.status, 404)
+
+ def testRedirect(self):
+ b = app.browser()
+
+ b.open('http://0.0.0.0:8080/redirect')
+ self.assertEquals(b.url, 'http://0.0.0.0:8080/')
+ b.open('http://0.0.0.0:8080/redirect?url=/hello/foo')
+ self.assertEquals(b.url, 'http://0.0.0.0:8080/hello/foo')
+
+ b.open('https://0.0.0.0:8080/redirect')
+ self.assertEquals(b.url, 'https://0.0.0.0:8080/')
+ b.open('https://0.0.0.0:8080/redirect?url=/hello/foo')
+ self.assertEquals(b.url, 'https://0.0.0.0:8080/hello/foo')
+
+if __name__ == "__main__":
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/db.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,162 @@
+"""DB test"""
+import webtest
+import web
+
+class DBTest(webtest.TestCase):
+ dbname = 'postgres'
+ driver = None
+
+ def setUp(self):
+ self.db = webtest.setup_database(self.dbname, driver=self.driver)
+ self.db.query("CREATE TABLE person (name text, email text, active boolean)")
+
+ def tearDown(self):
+ # there might be some error with the current connection, delete from a new connection
+ self.db = webtest.setup_database(self.dbname, driver=self.driver)
+ self.db.query('DROP TABLE person')
+
+ def _testable(self):
+ try:
+ webtest.setup_database(self.dbname, driver=self.driver)
+ return True
+ except ImportError, e:
+ print >> web.debug, str(e), "(ignoring %s)" % self.__class__.__name__
+ return False
+
+ def testUnicode(self):
+ # Bug#177265: unicode queries throw errors
+ self.db.select('person', where='name=$name', vars={'name': u'\xf4'})
+
+ def assertRows(self, n):
+ result = self.db.select('person')
+ self.assertEquals(len(list(result)), n)
+
+ def testCommit(self):
+ t = self.db.transaction()
+ self.db.insert('person', False, name='user1')
+ t.commit()
+
+ t = self.db.transaction()
+ self.db.insert('person', False, name='user2')
+ self.db.insert('person', False, name='user3')
+ t.commit()
+
+ self.assertRows(3)
+
+ def testRollback(self):
+ t = self.db.transaction()
+ self.db.insert('person', False, name='user1')
+ self.db.insert('person', False, name='user2')
+ self.db.insert('person', False, name='user3')
+ t.rollback()
+ self.assertRows(0)
+
+ def testWrongQuery(self):
+ # It should be possible to run a correct query after getting an error from a wrong query.
+ try:
+ self.db.select('notthere')
+ except:
+ pass
+ self.db.select('person')
+
+ def testNestedTransactions(self):
+ t1 = self.db.transaction()
+ self.db.insert('person', False, name='user1')
+ self.assertRows(1)
+
+ t2 = self.db.transaction()
+ self.db.insert('person', False, name='user2')
+ self.assertRows(2)
+ t2.rollback()
+ self.assertRows(1)
+ t3 = self.db.transaction()
+ self.db.insert('person', False, name='user3')
+ self.assertRows(2)
+ t3.commit()
+ t1.commit()
+ self.assertRows(2)
+
+ def testPooling(self):
+ # can't test pooling if DBUtils is not installed
+ try:
+ import DBUtils
+ except ImportError:
+ return
+ db = webtest.setup_database(self.dbname, pooling=True)
+ self.assertEquals(db.ctx.db.__class__.__module__, 'DBUtils.PooledDB')
+ db.select('person', limit=1)
+
+ def test_multiple_insert(self):
+ db = webtest.setup_database(self.dbname)
+ db.multiple_insert('person', [dict(name='a'), dict(name='b')], seqname=False)
+
+ assert db.select("person", where="name='a'")
+ assert db.select("person", where="name='b'")
+
+ def test_result_is_unicode(self):
+ db = webtest.setup_database(self.dbname)
+ self.db.insert('person', False, name='user')
+ name = db.select('person')[0].name
+ self.assertEquals(type(name), unicode)
+
+ def testBoolean(self):
+ def t(active):
+ name ='name-%s' % active
+ self.db.insert('person', False, name=name, active=active)
+ a = self.db.select('person', where='name=$name', vars=locals())[0].active
+ self.assertEquals(a, active)
+ t(False)
+ t(True)
+
+class PostgresTest(DBTest):
+ dbname = "postgres"
+ driver = "psycopg2"
+
+class PostgresTest_psycopg(PostgresTest):
+ driver = "psycopg"
+
+class PostgresTest_pgdb(PostgresTest):
+ driver = "pgdb"
+
+class SqliteTest(DBTest):
+ dbname = "sqlite"
+ driver = "sqlite3"
+
+ def testNestedTransactions(self):
+ #nested transactions does not work with sqlite
+ pass
+
+class SqliteTest_pysqlite2(SqliteTest):
+ driver = "pysqlite2.dbapi2"
+
+class MySQLTest(DBTest):
+ dbname = "mysql"
+
+ def setUp(self):
+ self.db = webtest.setup_database(self.dbname)
+ # In mysql, transactions are supported only with INNODB engine.
+ self.db.query("CREATE TABLE person (name text, email text) ENGINE=INNODB")
+
+ def testBoolean(self):
+ # boolean datatype is not suppoted in MySQL (at least until v5.0)
+ pass
+
+del DBTest
+
+def is_test(cls):
+ import inspect
+ return inspect.isclass(cls) and webtest.TestCase in inspect.getmro(cls)
+
+# ignore db tests when the required db adapter is not found.
+for t in globals().values():
+ if is_test(t) and not t('_testable')._testable():
+ del globals()[t.__name__]
+del t
+
+try:
+ import DBUtils
+except ImportError, e:
+ print >> web.debug, str(e) + "(ignoring testPooling)"
+
+if __name__ == '__main__':
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/doctests.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,20 @@
+"""Run all doctests in web.py.
+"""
+import webtest
+
+def suite():
+ modules = [
+ "web.application",
+ "web.db",
+ "web.http",
+ "web.net",
+ "web.session",
+ "web.template",
+ "web.utils",
+# "web.webapi",
+# "web.wsgi",
+ ]
+ return webtest.doctest_suite(modules)
+
+if __name__ == "__main__":
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/session.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,74 @@
+import webtest
+import web
+import tempfile
+
+class SessionTest(webtest.TestCase):
+ def setUp(self):
+ app = web.auto_application()
+ session = self.make_session(app)
+ class count(app.page):
+ def GET(self):
+ session.count += 1
+ return str(session.count)
+
+ class reset(app.page):
+ def GET(self):
+ session.kill()
+ return ""
+
+ self.app = app
+ self.session = session
+
+ def make_session(self, app):
+ dir = tempfile.mkdtemp()
+ store = web.session.DiskStore(tempfile.mkdtemp())
+ return web.session.Session(app, store, {'count': 0})
+
+ def testSession(self):
+ b = self.app.browser()
+ self.assertEquals(b.open('/count').read(), '1')
+ self.assertEquals(b.open('/count').read(), '2')
+ self.assertEquals(b.open('/count').read(), '3')
+ b.open('/reset')
+ self.assertEquals(b.open('/count').read(), '1')
+
+ def testParallelSessions(self):
+ b1 = self.app.browser()
+ b2 = self.app.browser()
+
+ b1.open('/count')
+
+ for i in range(1, 10):
+ self.assertEquals(b1.open('/count').read(), str(i+1))
+ self.assertEquals(b2.open('/count').read(), str(i))
+
+ def testBadSessionId(self):
+ b = self.app.browser()
+ self.assertEquals(b.open('/count').read(), '1')
+ self.assertEquals(b.open('/count').read(), '2')
+
+ cookie = b.cookiejar._cookies['0.0.0.0']['/']['webpy_session_id']
+ cookie.value = '/etc/password'
+ self.assertEquals(b.open('/count').read(), '1')
+
+class DBSessionTest(SessionTest):
+ """Session test with db store."""
+ def make_session(self, app):
+ db = webtest.setup_database("postgres")
+ #db.printing = True
+ db.query(""
+ + "CREATE TABLE session ("
+ + " session_id char(128) unique not null,"
+ + " atime timestamp default (current_timestamp at time zone 'utc'),"
+ + " data text)"
+ )
+ store = web.session.DBStore(db, 'session')
+ return web.session.Session(app, store, {'count': 0})
+
+ def tearDown(self):
+ # there might be some error with the current connection, delete from a new connection
+ self.db = webtest.setup_database("postgres")
+ self.db.query('DROP TABLE session')
+
+if __name__ == "__main__":
+ webtest.main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/test/webtest.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,20 @@
+"""webtest: test utilities.
+"""
+import sys, os
+
+# adding current directory to path to make sure local modules can be imported
+sys.path.insert(0, '.')
+
+from web.test import *
+
+def setup_database(dbname, driver=None, pooling=False):
+ if dbname == 'sqlite':
+ db = web.database(dbn=dbname, db='webpy.db', pooling=pooling, driver=driver)
+ elif dbname == 'postgres':
+ user = os.getenv('USER')
+ db = web.database(dbn=dbname, db='webpy', user=user, pw='', pooling=pooling, driver=driver)
+ else:
+ db = web.database(dbn=dbname, db='webpy', user='scott', pw='tiger', pooling=pooling, driver=driver)
+
+ db.printing = '-v' in sys.argv
+ return db
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/tools/_makedoc.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,81 @@
+import os
+import web
+
+class Parser:
+ def __init__(self):
+ self.mode = 'normal'
+ self.text = ''
+
+ def go(self, pyfile):
+ for line in file(pyfile):
+ if self.mode == 'in def':
+ self.text += ' ' + line.strip()
+ if line.strip().endswith(':'):
+ if self.definition(self.text):
+ self.text = ''
+ self.mode = 'in func'
+ else:
+ self.text = ''
+ self.mode = 'normal'
+
+ elif self.mode == 'in func':
+ if '"""' in line:
+ self.text += line.strip().strip('"')
+ self.mode = 'in doc'
+ if line.count('"""') == 2:
+ self.mode = 'normal'
+ self.docstring(self.text)
+ self.text = ''
+ else:
+ self.mode = 'normal'
+
+ elif self.mode == 'in doc':
+ self.text += ' ' + line
+ if '"""' in line:
+ self.mode = 'normal'
+ self.docstring(self.text.strip().strip('"'))
+ self.text = ''
+
+ elif line.startswith('## '):
+ self.header(line.strip().strip('#'))
+
+ elif line.startswith('def ') or line.startswith('class '):
+ self.text += line.strip().strip(':')
+ if line.strip().endswith(':'):
+ if self.definition(self.text):
+ self.text = ''
+ self.mode = 'in func'
+ else:
+ self.text = ''
+ self.mode = 'normal'
+ else:
+ self.mode = 'in def'
+
+ def clean(self, text):
+ text = text.strip()
+ text = text.replace('*', r'\*')
+ return text
+
+ def definition(self, text):
+ text = web.lstrips(text, 'def ')
+ if text.startswith('_') or text.startswith('class _'):
+ return False
+ print '`'+text.strip()+'`'
+ return True
+
+ def docstring(self, text):
+ print ' :', text.strip()
+ print
+
+ def header(self, text):
+ print '##', text.strip()
+ print
+
+for pyfile in os.listdir('trunk/web'):
+ if pyfile[-2:] == 'py':
+ print
+ print '## ' + pyfile
+ print
+ Parser().go('trunk/web/' + pyfile)
+print '`ctx`\n :',
+print '\n'.join(' '+x for x in web.ctx.__doc__.strip().split('\n'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/tools/makedoc.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,170 @@
+"""
+Outputs web.py docs as html
+version 2.0: documents all code, and indents nicely.
+By Colin Rothwell (TheBoff)
+"""
+import sys
+import inspect
+import markdown
+sys.path.insert(0, '..')
+
+modules = [
+ 'web.application',
+ 'web.contrib.template',
+ 'web.db',
+ 'web.debugerror',
+ 'web.form',
+ 'web.http',
+ 'web.httpserver',
+ 'web.net',
+ 'web.session',
+ 'web.template',
+ 'web.utils',
+ 'web.webapi',
+ 'web.webopenid',
+ 'web.wsgi'
+]
+
+item_start = '<code class="%s">'
+item_end = '</code>'
+
+indent_amount = 30
+
+doc_these = ( #These are the types of object that should be docced
+ 'module',
+ 'classobj',
+ 'instancemethod',
+ 'function',
+ 'type',
+ 'property',
+)
+
+not_these_names = ( #Any particular object names that shouldn't be doced
+ 'fget',
+ 'fset',
+ 'fdel',
+ 'storage', #These stop the lower case versions getting docced
+ 'memoize',
+ 'iterbetter',
+ 'capturesstdout',
+ 'profile',
+ 'threadeddict',
+ 'd', #Don't know what this is, but only only conclude it shouldn't be doc'd
+)
+
+css = '''
+<style type="text/css">
+.module {
+ font-size: 130%;
+ font-weight: bold;
+}
+
+.function, .class, .type {
+ font-size: 120%;
+ font-weight: bold;
+}
+
+.method, .property {
+ font-size: 115%;
+ font-weight: bold;
+}
+
+.ts {
+ font-size: small;
+ font-weight: lighter;
+ color: grey;
+}
+
+#contents_link {
+ position: fixed;
+ top: 0;
+ right: 0;
+ padding: 5px;
+ background: rgba(255, 255, 255, 0.5);
+}
+
+#contents_link a:hover {
+ font-weight: bold;
+}
+</style>
+'''
+
+
+indent_start = '<div style="margin-left:%dpx">'
+indent_end = '</div>'
+
+header = '''
+<div id="contents_link">
+<a href="#top">Back to contents</a>
+</div>
+'''
+
+def type_string(ob):
+ return str(type(ob)).split("'")[1]
+
+def ts_css(text):
+ """applies nice css to the type string"""
+ return '<span class="ts">%s</span>' % text
+
+def arg_string(func):
+ """Returns a nice argstring for a function or method"""
+ return inspect.formatargspec(*inspect.getargspec(func))
+
+def recurse_over(ob, name, indent_level=0):
+ ts = type_string(ob)
+ if not ts in doc_these: return #stos what shouldn't be docced getting docced
+ if indent_level > 0 and ts == 'module': return #Stops it getting into the stdlib
+ if name in not_these_names: return #Stops things we don't want getting docced
+
+ indent = indent_level * indent_amount #Indents nicely
+ ds_indent = indent + (indent_amount / 2)
+ if indent_level > 0: print indent_start % indent
+
+ argstr = ''
+ if ts.endswith(('function', 'method')):
+ argstr = arg_string(ob)
+ elif ts == 'classobj' or ts == 'type':
+ if ts == 'classobj': ts = 'class'
+ if hasattr(ob, '__init__'):
+ if type_string(ob.__init__) == 'instancemethod':
+ argstr = arg_string(ob.__init__)
+ else:
+ argstr = '(self)'
+ if ts == 'instancemethod': ts = 'method' #looks much nicer
+
+ ds = inspect.getdoc(ob)
+ if ds is None: ds = ''
+ ds = markdown.Markdown(ds)
+
+ mlink = '<a name="%s">' % name if ts == 'module' else ''
+ mend = '</a>' if ts == 'module' else ''
+
+ print ''.join(('<p>', ts_css(ts), item_start % ts, ' ', mlink, name, argstr,
+ mend, item_end, '<br />'))
+ print ''.join((indent_start % ds_indent, ds, indent_end, '</p>'))
+ #Although ''.join looks wierd, it's alot faster is string addition
+ members = ''
+
+ if hasattr(ob, '__all__'): members = ob.__all__
+ else: members = [item for item in dir(ob) if not item.startswith('_')]
+
+ if not 'im_class' in members:
+ for name in members:
+ recurse_over(getattr(ob, name), name, indent_level + 1)
+ if indent_level > 0: print indent_end
+
+def main():
+ print '<div>' #Stops markdown vandalising my html.
+ print css
+ print header
+ print '<ul>'
+ for name in modules:
+ print '<li><a href="#%(name)s">%(name)s</a></li>' % dict(name=name)
+ print '</ul>'
+ for name in modules:
+ mod = __import__(name, {}, {}, 'x')
+ recurse_over(mod, name)
+ print '</div>'
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/tools/markdown.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+import re, md5, sys, string
+
+"""markdown.py: A Markdown-styled-text to HTML converter in Python.
+
+Usage:
+ ./markdown.py textfile.markdown
+
+Calling:
+ import markdown
+ somehtml = markdown.markdown(sometext)
+
+For other versions of markdown, see:
+ http://www.freewisdom.org/projects/python-markdown/
+ http://en.wikipedia.org/wiki/Markdown
+"""
+
+__version__ = '1.0.1-2' # port of 1.0.1
+__license__ = "GNU GPL 2"
+__author__ = [
+ 'John Gruber <http://daringfireball.net/>',
+ 'Tollef Fog Heen <tfheen@err.no>',
+ 'Aaron Swartz <me@aaronsw.com>'
+]
+
+def htmlquote(text):
+ """Encodes `text` for raw use in HTML."""
+ text = text.replace("&", "&") # Must be done first!
+ text = text.replace("<", "<")
+ text = text.replace(">", ">")
+ text = text.replace("'", "'")
+ text = text.replace('"', """)
+ return text
+
+def semirandom(seed):
+ x = 0
+ for c in md5.new(seed).digest(): x += ord(c)
+ return x / (255*16.)
+
+class _Markdown:
+ emptyelt = " />"
+ tabwidth = 4
+
+ escapechars = '\\`*_{}[]()>#+-.!'
+ escapetable = {}
+ for char in escapechars:
+ escapetable[char] = md5.new(char).hexdigest()
+
+ r_multiline = re.compile("\n{2,}")
+ r_stripspace = re.compile(r"^[ \t]+$", re.MULTILINE)
+ def parse(self, text):
+ self.urls = {}
+ self.titles = {}
+ self.html_blocks = {}
+ self.list_level = 0
+
+ text = text.replace("\r\n", "\n")
+ text = text.replace("\r", "\n")
+ text += "\n\n"
+ text = self._Detab(text)
+ text = self.r_stripspace.sub("", text)
+ text = self._HashHTMLBlocks(text)
+ text = self._StripLinkDefinitions(text)
+ text = self._RunBlockGamut(text)
+ text = self._UnescapeSpecialChars(text)
+ return text
+
+ r_StripLinkDefinitions = re.compile(r"""
+ ^[ ]{0,%d}\[(.+)\]: # id = $1
+ [ \t]*\n?[ \t]*
+ <?(\S+?)>? # url = $2
+ [ \t]*\n?[ \t]*
+ (?:
+ (?<=\s) # lookbehind for whitespace
+ [\"\(] # " is backlashed so it colorizes our code right
+ (.+?) # title = $3
+ [\"\)]
+ [ \t]*
+ )? # title is optional
+ (?:\n+|\Z)
+ """ % (tabwidth-1), re.MULTILINE|re.VERBOSE)
+ def _StripLinkDefinitions(self, text):
+ def replacefunc(matchobj):
+ (t1, t2, t3) = matchobj.groups()
+ #@@ case sensitivity?
+ self.urls[t1.lower()] = self._EncodeAmpsAndAngles(t2)
+ if t3 is not None:
+ self.titles[t1.lower()] = t3.replace('"', '"')
+ return ""
+
+ text = self.r_StripLinkDefinitions.sub(replacefunc, text)
+ return text
+
+ blocktagsb = r"p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|math"
+ blocktagsa = blocktagsb + "|ins|del"
+
+ r_HashHTMLBlocks1 = re.compile(r"""
+ ( # save in $1
+ ^ # start of line (with /m)
+ <(%s) # start tag = $2
+ \b # word break
+ (.*\n)*? # any number of lines, minimally matching
+ </\2> # the matching end tag
+ [ \t]* # trailing spaces/tabs
+ (?=\n+|$) # followed by a newline or end of document
+ )
+ """ % blocktagsa, re.MULTILINE | re.VERBOSE)
+
+ r_HashHTMLBlocks2 = re.compile(r"""
+ ( # save in $1
+ ^ # start of line (with /m)
+ <(%s) # start tag = $2
+ \b # word break
+ (.*\n)*? # any number of lines, minimally matching
+ .*</\2> # the matching end tag
+ [ \t]* # trailing spaces/tabs
+ (?=\n+|\Z) # followed by a newline or end of document
+ )
+ """ % blocktagsb, re.MULTILINE | re.VERBOSE)
+
+ r_HashHR = re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in $1
+ [ ]{0,%d}
+ <(hr) # start tag = $2
+ \b # word break
+ ([^<>])*? #
+ /?> # the matching end tag
+ [ \t]*
+ (?=\n{2,}|\Z)# followed by a blank line or end of document
+ )
+ """ % (tabwidth-1), re.VERBOSE)
+ r_HashComment = re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in $1
+ [ ]{0,%d}
+ (?:
+ <!
+ (--.*?--\s*)+
+ >
+ )
+ [ \t]*
+ (?=\n{2,}|\Z)# followed by a blank line or end of document
+ )
+ """ % (tabwidth-1), re.VERBOSE)
+
+ def _HashHTMLBlocks(self, text):
+ def handler(m):
+ key = md5.new(m.group(1)).hexdigest()
+ self.html_blocks[key] = m.group(1)
+ return "\n\n%s\n\n" % key
+
+ text = self.r_HashHTMLBlocks1.sub(handler, text)
+ text = self.r_HashHTMLBlocks2.sub(handler, text)
+ oldtext = text
+ text = self.r_HashHR.sub(handler, text)
+ text = self.r_HashComment.sub(handler, text)
+ return text
+
+ #@@@ wrong!
+ r_hr1 = re.compile(r'^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$', re.M)
+ r_hr2 = re.compile(r'^[ ]{0,2}([ ]?-[ ]?){3,}[ \t]*$', re.M)
+ r_hr3 = re.compile(r'^[ ]{0,2}([ ]?_[ ]?){3,}[ \t]*$', re.M)
+
+ def _RunBlockGamut(self, text):
+ text = self._DoHeaders(text)
+ for x in [self.r_hr1, self.r_hr2, self.r_hr3]:
+ text = x.sub("\n<hr%s\n" % self.emptyelt, text);
+ text = self._DoLists(text)
+ text = self._DoCodeBlocks(text)
+ text = self._DoBlockQuotes(text)
+
+ # We did this in parse()
+ # to escape the source
+ # now it's stuff _we_ made
+ # so we don't wrap it in <p>s.
+ text = self._HashHTMLBlocks(text)
+ text = self._FormParagraphs(text)
+ return text
+
+ r_NewLine = re.compile(" {2,}\n")
+ def _RunSpanGamut(self, text):
+ text = self._DoCodeSpans(text)
+ text = self._EscapeSpecialChars(text)
+ text = self._DoImages(text)
+ text = self._DoAnchors(text)
+ text = self._DoAutoLinks(text)
+ text = self._EncodeAmpsAndAngles(text)
+ text = self._DoItalicsAndBold(text)
+ text = self.r_NewLine.sub(" <br%s\n" % self.emptyelt, text)
+ return text
+
+ def _EscapeSpecialChars(self, text):
+ tokens = self._TokenizeHTML(text)
+ text = ""
+ for cur_token in tokens:
+ if cur_token[0] == "tag":
+ cur_token[1] = cur_token[1].replace('*', self.escapetable["*"])
+ cur_token[1] = cur_token[1].replace('_', self.escapetable["_"])
+ text += cur_token[1]
+ else:
+ text += self._EncodeBackslashEscapes(cur_token[1])
+ return text
+
+ r_DoAnchors1 = re.compile(
+ r""" ( # wrap whole match in $1
+ \[
+ (.*?) # link text = $2
+ # [for bracket nesting, see below]
+ \]
+
+ [ ]? # one optional space
+ (?:\n[ ]*)? # one optional newline followed by spaces
+
+ \[
+ (.*?) # id = $3
+ \]
+ )
+ """, re.S|re.VERBOSE)
+ r_DoAnchors2 = re.compile(
+ r""" ( # wrap whole match in $1
+ \[
+ (.*?) # link text = $2
+ \]
+ \( # literal paren
+ [ \t]*
+ <?(.+?)>? # href = $3
+ [ \t]*
+ ( # $4
+ ([\'\"]) # quote char = $5
+ (.*?) # Title = $6
+ \5 # matching quote
+ )? # title is optional
+ \)
+ )
+ """, re.S|re.VERBOSE)
+ def _DoAnchors(self, text):
+ # We here don't do the same as the perl version, as python's regex
+ # engine gives us no way to match brackets.
+
+ def handler1(m):
+ whole_match = m.group(1)
+ link_text = m.group(2)
+ link_id = m.group(3).lower()
+ if not link_id: link_id = link_text.lower()
+ title = self.titles.get(link_id, None)
+
+
+ if self.urls.has_key(link_id):
+ url = self.urls[link_id]
+ url = url.replace("*", self.escapetable["*"])
+ url = url.replace("_", self.escapetable["_"])
+ res = '<a href="%s"' % htmlquote(url)
+
+ if title:
+ title = title.replace("*", self.escapetable["*"])
+ title = title.replace("_", self.escapetable["_"])
+ res += ' title="%s"' % htmlquote(title)
+ res += ">%s</a>" % htmlquote(link_text)
+ else:
+ res = whole_match
+ return res
+
+ def handler2(m):
+ whole_match = m.group(1)
+ link_text = m.group(2)
+ url = m.group(3)
+ title = m.group(6)
+
+ url = url.replace("*", self.escapetable["*"])
+ url = url.replace("_", self.escapetable["_"])
+ res = '''<a href="%s"''' % htmlquote(url)
+
+ if title:
+ title = title.replace('"', '"')
+ title = title.replace("*", self.escapetable["*"])
+ title = title.replace("_", self.escapetable["_"])
+ res += ' title="%s"' % htmlquote(title)
+ res += ">%s</a>" % htmlquote(link_text)
+ return res
+
+ text = self.r_DoAnchors1.sub(handler1, text)
+ text = self.r_DoAnchors2.sub(handler2, text)
+ return text
+
+ r_DoImages1 = re.compile(
+ r""" ( # wrap whole match in $1
+ !\[
+ (.*?) # alt text = $2
+ \]
+
+ [ ]? # one optional space
+ (?:\n[ ]*)? # one optional newline followed by spaces
+
+ \[
+ (.*?) # id = $3
+ \]
+
+ )
+ """, re.VERBOSE|re.S)
+
+ r_DoImages2 = re.compile(
+ r""" ( # wrap whole match in $1
+ !\[
+ (.*?) # alt text = $2
+ \]
+ \( # literal paren
+ [ \t]*
+ <?(\S+?)>? # src url = $3
+ [ \t]*
+ ( # $4
+ ([\'\"]) # quote char = $5
+ (.*?) # title = $6
+ \5 # matching quote
+ [ \t]*
+ )? # title is optional
+ \)
+ )
+ """, re.VERBOSE|re.S)
+
+ def _DoImages(self, text):
+ def handler1(m):
+ whole_match = m.group(1)
+ alt_text = m.group(2)
+ link_id = m.group(3).lower()
+
+ if not link_id:
+ link_id = alt_text.lower()
+
+ alt_text = alt_text.replace('"', """)
+ if self.urls.has_key(link_id):
+ url = self.urls[link_id]
+ url = url.replace("*", self.escapetable["*"])
+ url = url.replace("_", self.escapetable["_"])
+ res = '''<img src="%s" alt="%s"''' % (htmlquote(url), htmlquote(alt_text))
+ if self.titles.has_key(link_id):
+ title = self.titles[link_id]
+ title = title.replace("*", self.escapetable["*"])
+ title = title.replace("_", self.escapetable["_"])
+ res += ' title="%s"' % htmlquote(title)
+ res += self.emptyelt
+ else:
+ res = whole_match
+ return res
+
+ def handler2(m):
+ whole_match = m.group(1)
+ alt_text = m.group(2)
+ url = m.group(3)
+ title = m.group(6) or ''
+
+ alt_text = alt_text.replace('"', """)
+ title = title.replace('"', """)
+ url = url.replace("*", self.escapetable["*"])
+ url = url.replace("_", self.escapetable["_"])
+ res = '<img src="%s" alt="%s"' % (htmlquote(url), htmlquote(alt_text))
+ if title is not None:
+ title = title.replace("*", self.escapetable["*"])
+ title = title.replace("_", self.escapetable["_"])
+ res += ' title="%s"' % htmlquote(title)
+ res += self.emptyelt
+ return res
+
+ text = self.r_DoImages1.sub(handler1, text)
+ text = self.r_DoImages2.sub(handler2, text)
+ return text
+
+ r_DoHeaders = re.compile(r"^(\#{1,6})[ \t]*(.+?)[ \t]*\#*\n+", re.VERBOSE|re.M)
+ def _DoHeaders(self, text):
+ def findheader(text, c, n):
+ textl = text.split('\n')
+ for i in xrange(len(textl)):
+ if i >= len(textl): continue
+ count = textl[i].strip().count(c)
+ if count > 0 and count == len(textl[i].strip()) and textl[i+1].strip() == '' and textl[i-1].strip() != '':
+ textl = textl[:i] + textl[i+1:]
+ textl[i-1] = '<h'+n+'>'+self._RunSpanGamut(textl[i-1])+'</h'+n+'>'
+ textl = textl[:i] + textl[i+1:]
+ text = '\n'.join(textl)
+ return text
+
+ def handler(m):
+ level = len(m.group(1))
+ header = self._RunSpanGamut(m.group(2))
+ return "<h%s>%s</h%s>\n\n" % (level, header, level)
+
+ text = findheader(text, '=', '1')
+ text = findheader(text, '-', '2')
+ text = self.r_DoHeaders.sub(handler, text)
+ return text
+
+ rt_l = r"""
+ (
+ (
+ [ ]{0,%d}
+ ([*+-]|\d+[.])
+ [ \t]+
+ )
+ (?:.+?)
+ (
+ \Z
+ |
+ \n{2,}
+ (?=\S)
+ (?![ \t]* ([*+-]|\d+[.])[ \t]+)
+ )
+ )
+ """ % (tabwidth - 1)
+ r_DoLists = re.compile('^'+rt_l, re.M | re.VERBOSE | re.S)
+ r_DoListsTop = re.compile(
+ r'(?:\A\n?|(?<=\n\n))'+rt_l, re.M | re.VERBOSE | re.S)
+
+ def _DoLists(self, text):
+ def handler(m):
+ list_type = "ol"
+ if m.group(3) in [ "*", "-", "+" ]:
+ list_type = "ul"
+ listn = m.group(1)
+ listn = self.r_multiline.sub("\n\n\n", listn)
+ res = self._ProcessListItems(listn)
+ res = "<%s>\n%s</%s>\n" % (list_type, res, list_type)
+ return res
+
+ if self.list_level:
+ text = self.r_DoLists.sub(handler, text)
+ else:
+ text = self.r_DoListsTop.sub(handler, text)
+ return text
+
+ r_multiend = re.compile(r"\n{2,}\Z")
+ r_ProcessListItems = re.compile(r"""
+ (\n)? # leading line = $1
+ (^[ \t]*) # leading whitespace = $2
+ ([*+-]|\d+[.]) [ \t]+ # list marker = $3
+ ((?:.+?) # list item text = $4
+ (\n{1,2}))
+ (?= \n* (\Z | \2 ([*+-]|\d+[.]) [ \t]+))
+ """, re.VERBOSE | re.M | re.S)
+
+ def _ProcessListItems(self, text):
+ self.list_level += 1
+ text = self.r_multiend.sub("\n", text)
+
+ def handler(m):
+ item = m.group(4)
+ leading_line = m.group(1)
+ leading_space = m.group(2)
+
+ if leading_line or self.r_multiline.search(item):
+ item = self._RunBlockGamut(self._Outdent(item))
+ else:
+ item = self._DoLists(self._Outdent(item))
+ if item[-1] == "\n": item = item[:-1] # chomp
+ item = self._RunSpanGamut(item)
+ return "<li>%s</li>\n" % item
+
+ text = self.r_ProcessListItems.sub(handler, text)
+ self.list_level -= 1
+ return text
+
+ r_DoCodeBlocks = re.compile(r"""
+ (?:\n\n|\A)
+ ( # $1 = the code block
+ (?:
+ (?:[ ]{%d} | \t) # Lines must start with a tab or equiv
+ .*\n+
+ )+
+ )
+ ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space/end of doc
+ """ % (tabwidth, tabwidth), re.M | re.VERBOSE)
+ def _DoCodeBlocks(self, text):
+ def handler(m):
+ codeblock = m.group(1)
+ codeblock = self._EncodeCode(self._Outdent(codeblock))
+ codeblock = self._Detab(codeblock)
+ codeblock = codeblock.lstrip("\n")
+ codeblock = codeblock.rstrip()
+ res = "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock
+ return res
+
+ text = self.r_DoCodeBlocks.sub(handler, text)
+ return text
+ r_DoCodeSpans = re.compile(r"""
+ (`+) # $1 = Opening run of `
+ (.+?) # $2 = The code block
+ (?<!`)
+ \1 # Matching closer
+ (?!`)
+ """, re.I|re.VERBOSE)
+ def _DoCodeSpans(self, text):
+ def handler(m):
+ c = m.group(2)
+ c = c.strip()
+ c = self._EncodeCode(c)
+ return "<code>%s</code>" % c
+
+ text = self.r_DoCodeSpans.sub(handler, text)
+ return text
+
+ def _EncodeCode(self, text):
+ text = text.replace("&","&")
+ text = text.replace("<","<")
+ text = text.replace(">",">")
+ for c in "*_{}[]\\":
+ text = text.replace(c, self.escapetable[c])
+ return text
+
+
+ r_DoBold = re.compile(r"(\*\*|__) (?=\S) (.+?[*_]*) (?<=\S) \1", re.VERBOSE | re.S)
+ r_DoItalics = re.compile(r"(\*|_) (?=\S) (.+?) (?<=\S) \1", re.VERBOSE | re.S)
+ def _DoItalicsAndBold(self, text):
+ text = self.r_DoBold.sub(r"<strong>\2</strong>", text)
+ text = self.r_DoItalics.sub(r"<em>\2</em>", text)
+ return text
+
+ r_start = re.compile(r"^", re.M)
+ r_DoBlockQuotes1 = re.compile(r"^[ \t]*>[ \t]?", re.M)
+ r_DoBlockQuotes2 = re.compile(r"^[ \t]+$", re.M)
+ r_DoBlockQuotes3 = re.compile(r"""
+ ( # Wrap whole match in $1
+ (
+ ^[ \t]*>[ \t]? # '>' at the start of a line
+ .+\n # rest of the first line
+ (.+\n)* # subsequent consecutive lines
+ \n* # blanks
+ )+
+ )""", re.M | re.VERBOSE)
+ r_protectpre = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
+ r_propre = re.compile(r'^ ', re.M)
+
+ def _DoBlockQuotes(self, text):
+ def prehandler(m):
+ return self.r_propre.sub('', m.group(1))
+
+ def handler(m):
+ bq = m.group(1)
+ bq = self.r_DoBlockQuotes1.sub("", bq)
+ bq = self.r_DoBlockQuotes2.sub("", bq)
+ bq = self._RunBlockGamut(bq)
+ bq = self.r_start.sub(" ", bq)
+ bq = self.r_protectpre.sub(prehandler, bq)
+ return "<blockquote>\n%s\n</blockquote>\n\n" % bq
+
+ text = self.r_DoBlockQuotes3.sub(handler, text)
+ return text
+
+ r_tabbed = re.compile(r"^([ \t]*)")
+ def _FormParagraphs(self, text):
+ text = text.strip("\n")
+ grafs = self.r_multiline.split(text)
+
+ for g in xrange(len(grafs)):
+ t = grafs[g].strip() #@@?
+ if not self.html_blocks.has_key(t):
+ t = self._RunSpanGamut(t)
+ t = self.r_tabbed.sub(r"<p>", t)
+ t += "</p>"
+ grafs[g] = t
+
+ for g in xrange(len(grafs)):
+ t = grafs[g].strip()
+ if self.html_blocks.has_key(t):
+ grafs[g] = self.html_blocks[t]
+
+ return "\n\n".join(grafs)
+
+ r_EncodeAmps = re.compile(r"&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)")
+ r_EncodeAngles = re.compile(r"<(?![a-z/?\$!])")
+ def _EncodeAmpsAndAngles(self, text):
+ text = self.r_EncodeAmps.sub("&", text)
+ text = self.r_EncodeAngles.sub("<", text)
+ return text
+
+ def _EncodeBackslashEscapes(self, text):
+ for char in self.escapechars:
+ text = text.replace("\\" + char, self.escapetable[char])
+ return text
+
+ r_link = re.compile(r"<((https?|ftp):[^\'\">\s]+)>", re.I)
+ r_email = re.compile(r"""
+ <
+ (?:mailto:)?
+ (
+ [-.\w]+
+ \@
+ [-a-z0-9]+(\.[-a-z0-9]+)*\.[a-z]+
+ )
+ >""", re.VERBOSE|re.I)
+ def _DoAutoLinks(self, text):
+ text = self.r_link.sub(r'<a href="\1">\1</a>', text)
+
+ def handler(m):
+ l = m.group(1)
+ return self._EncodeEmailAddress(self._UnescapeSpecialChars(l))
+
+ text = self.r_email.sub(handler, text)
+ return text
+
+ r_EncodeEmailAddress = re.compile(r">.+?:")
+ def _EncodeEmailAddress(self, text):
+ encode = [
+ lambda x: "&#%s;" % ord(x),
+ lambda x: "&#x%X;" % ord(x),
+ lambda x: x
+ ]
+
+ text = "mailto:" + text
+ addr = ""
+ for c in text:
+ if c == ':': addr += c; continue
+
+ r = semirandom(addr)
+ if r < 0.45:
+ addr += encode[1](c)
+ elif r > 0.9 and c != '@':
+ addr += encode[2](c)
+ else:
+ addr += encode[0](c)
+
+ text = '<a href="%s">%s</a>' % (addr, addr)
+ text = self.r_EncodeEmailAddress.sub('>', text)
+ return text
+
+ def _UnescapeSpecialChars(self, text):
+ for key in self.escapetable.keys():
+ text = text.replace(self.escapetable[key], key)
+ return text
+
+ tokenize_depth = 6
+ tokenize_nested_tags = '|'.join([r'(?:<[a-z/!$](?:[^<>]'] * tokenize_depth) + (')*>)' * tokenize_depth)
+ r_TokenizeHTML = re.compile(
+ r"""(?: <! ( -- .*? -- \s* )+ > ) | # comment
+ (?: <\? .*? \?> ) | # processing instruction
+ %s # nested tags
+ """ % tokenize_nested_tags, re.I|re.VERBOSE)
+ def _TokenizeHTML(self, text):
+ pos = 0
+ tokens = []
+ matchobj = self.r_TokenizeHTML.search(text, pos)
+ while matchobj:
+ whole_tag = matchobj.string[matchobj.start():matchobj.end()]
+ sec_start = matchobj.end()
+ tag_start = sec_start - len(whole_tag)
+ if pos < tag_start:
+ tokens.append(["text", matchobj.string[pos:tag_start]])
+
+ tokens.append(["tag", whole_tag])
+ pos = sec_start
+ matchobj = self.r_TokenizeHTML.search(text, pos)
+
+ if pos < len(text):
+ tokens.append(["text", text[pos:]])
+ return tokens
+
+ r_Outdent = re.compile(r"""^(\t|[ ]{1,%d})""" % tabwidth, re.M)
+ def _Outdent(self, text):
+ text = self.r_Outdent.sub("", text)
+ return text
+
+ def _Detab(self, text): return text.expandtabs(self.tabwidth)
+
+def Markdown(*args, **kw): return _Markdown().parse(*args, **kw)
+markdown = Markdown
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ print Markdown(open(sys.argv[1]).read())
+ else:
+ print Markdown(sys.stdin.read())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/__init__.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""web.py: makes web apps (http://webpy.org)"""
+
+from __future__ import generators
+
+__version__ = "0.32"
+__author__ = [
+ "Aaron Swartz <me@aaronsw.com>",
+ "Anand Chitipothu <anandology@gmail.com>"
+]
+__license__ = "public domain"
+__contributors__ = "see http://webpy.org/changes"
+
+import utils, db, net, wsgi, http, webapi, httpserver, debugerror
+import template, form
+
+import session
+
+from utils import *
+from db import *
+from net import *
+from wsgi import *
+from http import *
+from webapi import *
+from httpserver import *
+from debugerror import *
+from application import *
+from browser import *
+import test
+try:
+ import webopenid as openid
+except ImportError:
+ pass # requires openid module
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/application.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+"""
+Web application
+(from web.py)
+"""
+import webapi as web
+import webapi, wsgi, utils
+import debugerror
+from utils import lstrips, safeunicode
+import sys
+
+import urllib
+import traceback
+import itertools
+import os
+import re
+import types
+from exceptions import SystemExit
+
+try:
+ import wsgiref.handlers
+except ImportError:
+ pass # don't break people with old Pythons
+
+__all__ = [
+ "application", "auto_application",
+ "subdir_application", "subdomain_application",
+ "loadhook", "unloadhook",
+ "autodelegate"
+]
+
+class application:
+ """
+ Application to delegate requests based on path.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self): return "hello"
+ >>>
+ >>> app.request("/hello").data
+ 'hello'
+ """
+ def __init__(self, mapping=(), fvars={}, autoreload=None):
+ if autoreload is None:
+ autoreload = web.config.get('debug', False)
+ self.mapping = mapping
+ self.fvars = fvars
+ self.processors = []
+
+ self.add_processor(loadhook(self._load))
+ self.add_processor(unloadhook(self._unload))
+
+ if autoreload:
+ def main_module_name():
+ mod = sys.modules['__main__']
+ file = getattr(mod, '__file__', None) # make sure this works even from python interpreter
+ return file and os.path.splitext(os.path.basename(file))[0]
+
+ def modname(fvars):
+ """find name of the module name from fvars."""
+ file, name = fvars.get('__file__'), fvars.get('__name__')
+ if file is None or name is None:
+ return None
+
+ if name == '__main__':
+ # Since the __main__ module can't be reloaded, the module has
+ # to be imported using its file name.
+ name = main_module_name()
+ return name
+
+ mapping_name = utils.dictfind(fvars, mapping)
+ module_name = modname(fvars)
+
+ def reload_mapping():
+ """loadhook to reload mapping and fvars."""
+ mod = __import__(module_name)
+ mapping = getattr(mod, mapping_name, None)
+ if mapping:
+ self.fvars = mod.__dict__
+ self.mapping = mapping
+
+ self.add_processor(loadhook(Reloader()))
+ if mapping_name and module_name:
+ self.add_processor(loadhook(reload_mapping))
+
+ # load __main__ module usings its filename, so that it can be reloaded.
+ if main_module_name() and '__main__' in sys.argv:
+ try:
+ __import__(main_module_name())
+ except ImportError:
+ pass
+
+ def _load(self):
+ web.ctx.app_stack.append(self)
+
+ def _unload(self):
+ web.ctx.app_stack = web.ctx.app_stack[:-1]
+
+ if web.ctx.app_stack:
+ # this is a sub-application, revert ctx to earlier state.
+ oldctx = web.ctx.get('_oldctx')
+ if oldctx:
+ web.ctx.home = oldctx.home
+ web.ctx.homepath = oldctx.homepath
+ web.ctx.path = oldctx.path
+ web.ctx.fullpath = oldctx.fullpath
+
+ def _cleanup(self):
+ #@@@
+ # Since the CherryPy Webserver uses thread pool, the thread-local state is never cleared.
+ # This interferes with the other requests.
+ # clearing the thread-local storage to avoid that.
+ # see utils.ThreadedDict for details
+ import threading
+ t = threading.currentThread()
+ if hasattr(t, '_d'):
+ del t._d
+
+ def add_mapping(self, pattern, classname):
+ self.mapping += (pattern, classname)
+
+ def add_processor(self, processor):
+ """
+ Adds a processor to the application.
+
+ >>> urls = ("/(.*)", "echo")
+ >>> app = application(urls, globals())
+ >>> class echo:
+ ... def GET(self, name): return name
+ ...
+ >>>
+ >>> def hello(handler): return "hello, " + handler()
+ ...
+ >>> app.add_processor(hello)
+ >>> app.request("/web.py").data
+ 'hello, web.py'
+ """
+ self.processors.append(processor)
+
+ def request(self, localpart='/', method='GET', data=None,
+ host="0.0.0.0:8080", headers=None, https=False, **kw):
+ """Makes request to this application for the specified path and method.
+ Response will be a storage object with data, status and headers.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self):
+ ... web.header('Content-Type', 'text/plain')
+ ... return "hello"
+ ...
+ >>> response = app.request("/hello")
+ >>> response.data
+ 'hello'
+ >>> response.status
+ '200 OK'
+ >>> response.headers['Content-Type']
+ 'text/plain'
+
+ To use https, use https=True.
+
+ >>> urls = ("/redirect", "redirect")
+ >>> app = application(urls, globals())
+ >>> class redirect:
+ ... def GET(self): raise web.seeother("/foo")
+ ...
+ >>> response = app.request("/redirect")
+ >>> response.headers['Location']
+ 'http://0.0.0.0:8080/foo'
+ >>> response = app.request("/redirect", https=True)
+ >>> response.headers['Location']
+ 'https://0.0.0.0:8080/foo'
+
+ The headers argument specifies HTTP headers as a mapping object
+ such as a dict.
+
+ >>> urls = ('/ua', 'uaprinter')
+ >>> class uaprinter:
+ ... def GET(self):
+ ... return 'your user-agent is ' + web.ctx.env['HTTP_USER_AGENT']
+ ...
+ >>> app = application(urls, globals())
+ >>> app.request('/ua', headers = {
+ ... 'User-Agent': 'a small jumping bean/1.0 (compatible)'
+ ... }).data
+ 'your user-agent is a small jumping bean/1.0 (compatible)'
+
+ """
+ path, maybe_query = urllib.splitquery(localpart)
+ query = maybe_query or ""
+
+ if 'env' in kw:
+ env = kw['env']
+ else:
+ env = {}
+ env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https))
+ headers = headers or {}
+
+ for k, v in headers.items():
+ env['HTTP_' + k.upper().replace('-', '_')] = v
+
+ if 'HTTP_CONTENT_LENGTH' in env:
+ env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH')
+
+ if 'HTTP_CONTENT_TYPE' in env:
+ env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE')
+
+ if method in ["POST", "PUT"]:
+ data = data or ''
+ import StringIO
+ if isinstance(data, dict):
+ q = urllib.urlencode(data)
+ else:
+ q = data
+ env['wsgi.input'] = StringIO.StringIO(q)
+ if not env.get('CONTENT_TYPE', '').lower().startswith('multipart/') and 'CONTENT_LENGTH' not in env:
+ env['CONTENT_LENGTH'] = len(q)
+ response = web.storage()
+ def start_response(status, headers):
+ response.status = status
+ response.headers = dict(headers)
+ response.header_items = headers
+ response.data = "".join(self.wsgifunc()(env, start_response))
+ return response
+
+ def browser(self):
+ import browser
+ return browser.AppBrowser(self)
+
+ def handle(self):
+ fn, args = self._match(self.mapping, web.ctx.path)
+ return self._delegate(fn, self.fvars, args)
+
+ def handle_with_processors(self):
+ def process(processors):
+ try:
+ if processors:
+ p, processors = processors[0], processors[1:]
+ return p(lambda: process(processors))
+ else:
+ return self.handle()
+ except web.HTTPError:
+ raise
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ print >> web.debug, traceback.format_exc()
+ raise self.internalerror()
+
+ # processors must be applied in the resvere order. (??)
+ return process(self.processors)
+
+ def wsgifunc(self, *middleware):
+ """Returns a WSGI-compatible function for this application."""
+ def peep(iterator):
+ """Peeps into an iterator by doing an iteration
+ and returns an equivalent iterator.
+ """
+ # wsgi requires the headers first
+ # so we need to do an iteration
+ # and save the result for later
+ try:
+ firstchunk = iterator.next()
+ except StopIteration:
+ firstchunk = ''
+
+ return itertools.chain([firstchunk], iterator)
+
+ def is_generator(x): return x and hasattr(x, 'next')
+
+ def wsgi(env, start_resp):
+ self.load(env)
+ try:
+ # allow uppercase methods only
+ if web.ctx.method.upper() != web.ctx.method:
+ raise web.nomethod()
+
+ result = self.handle_with_processors()
+ if is_generator(result):
+ result = peep(result)
+ else:
+ result = [result]
+ except web.HTTPError, e:
+ result = [e.data]
+
+ result = web.utf8(iter(result))
+
+ status, headers = web.ctx.status, web.ctx.headers
+ start_resp(status, headers)
+
+ def cleanup():
+ self._cleanup()
+ yield '' # force this function to be a generator
+
+ return itertools.chain(result, cleanup())
+
+ for m in middleware:
+ wsgi = m(wsgi)
+
+ return wsgi
+
+ def run(self, *middleware):
+ """
+ Starts handling requests. If called in a CGI or FastCGI context, it will follow
+ that protocol. If called from the command line, it will start an HTTP
+ server on the port named in the first command line argument, or, if there
+ is no argument, on port 8080.
+
+ `middleware` is a list of WSGI middleware which is applied to the resulting WSGI
+ function.
+ """
+ return wsgi.runwsgi(self.wsgifunc(*middleware))
+
+ def cgirun(self, *middleware):
+ """
+ Return a CGI handler. This is mostly useful with Google App Engine.
+ There you can just do:
+
+ main = app.cgirun()
+ """
+ wsgiapp = self.wsgifunc(*middleware)
+
+ try:
+ from google.appengine.ext.webapp.util import run_wsgi_app
+ return run_wsgi_app(wsgiapp)
+ except ImportError:
+ # we're not running from within Google App Engine
+ return wsgiref.handlers.CGIHandler().run(wsgiapp)
+
+ def load(self, env):
+ """Initializes ctx using env."""
+ ctx = web.ctx
+ ctx.clear()
+ ctx.status = '200 OK'
+ ctx.headers = []
+ ctx.output = ''
+ ctx.environ = ctx.env = env
+ ctx.host = env.get('HTTP_HOST')
+
+ if env.get('wsgi.url_scheme') in ['http', 'https']:
+ ctx.protocol = env['wsgi.url_scheme']
+ elif env.get('HTTPS', '').lower() in ['on', 'true', '1']:
+ ctx.protocol = 'https'
+ else:
+ ctx.protocol = 'http'
+ ctx.homedomain = ctx.protocol + '://' + env.get('HTTP_HOST', '[unknown]')
+ ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
+ ctx.home = ctx.homedomain + ctx.homepath
+ #@@ home is changed when the request is handled to a sub-application.
+ #@@ but the real home is required for doing absolute redirects.
+ ctx.realhome = ctx.home
+ ctx.ip = env.get('REMOTE_ADDR')
+ ctx.method = env.get('REQUEST_METHOD')
+ ctx.path = env.get('PATH_INFO')
+ # http://trac.lighttpd.net/trac/ticket/406 requires:
+ if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
+ ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath)
+ # Apache and CherryPy webservers unquote the url but lighttpd doesn't.
+ # unquote explicitly for lighttpd to make ctx.path uniform across all servers.
+ ctx.path = urllib.unquote(ctx.path)
+
+ if env.get('QUERY_STRING'):
+ ctx.query = '?' + env.get('QUERY_STRING', '')
+ else:
+ ctx.query = ''
+
+ ctx.fullpath = ctx.path + ctx.query
+
+ for k, v in ctx.iteritems():
+ if isinstance(v, str):
+ ctx[k] = safeunicode(v)
+
+ # status must always be str
+ ctx.status = '200 OK'
+
+ ctx.app_stack = []
+
+ def _delegate(self, f, fvars, args=[]):
+ def handle_class(cls):
+ meth = web.ctx.method
+ if meth == 'HEAD' and not hasattr(cls, meth):
+ meth = 'GET'
+ if not hasattr(cls, meth):
+ raise web.nomethod(cls)
+ tocall = getattr(cls(), meth)
+ return tocall(*args)
+
+ def is_class(o): return isinstance(o, (types.ClassType, type))
+
+ if f is None:
+ raise web.notfound()
+ elif isinstance(f, application):
+ return f.handle_with_processors()
+ elif is_class(f):
+ return handle_class(f)
+ elif isinstance(f, basestring):
+ if f.startswith('redirect '):
+ url = f.split(' ', 1)[1]
+ if web.ctx.method == "GET":
+ x = web.ctx.env.get('QUERY_STRING', '')
+ if x:
+ url += '?' + x
+ raise web.redirect(url)
+ elif '.' in f:
+ x = f.split('.')
+ mod, cls = '.'.join(x[:-1]), x[-1]
+ mod = __import__(mod, globals(), locals(), [""])
+ cls = getattr(mod, cls)
+ else:
+ cls = fvars[f]
+ return handle_class(cls)
+ elif hasattr(f, '__call__'):
+ return f()
+ else:
+ return web.notfound()
+
+ def _match(self, mapping, value):
+ for pat, what in utils.group(mapping, 2):
+ if isinstance(what, application):
+ if value.startswith(pat):
+ f = lambda: self._delegate_sub_application(pat, what)
+ return f, None
+ else:
+ continue
+ elif isinstance(what, basestring):
+ what, result = utils.re_subm('^' + pat + '$', what, value)
+ else:
+ result = utils.re_compile('^' + pat + '$').match(value)
+
+ if result: # it's a match
+ return what, [x for x in result.groups()]
+ return None, None
+
+ def _delegate_sub_application(self, dir, app):
+ """Deletes request to sub application `app` rooted at the directory `dir`.
+ The home, homepath, path and fullpath values in web.ctx are updated to mimic request
+ to the subapp and are restored after it is handled.
+
+ @@Any issues with when used with yield?
+ """
+ web.ctx._oldctx = web.storage(web.ctx)
+ web.ctx.home += dir
+ web.ctx.homepath += dir
+ web.ctx.path = web.ctx.path[len(dir):]
+ web.ctx.fullpath = web.ctx.fullpath[len(dir):]
+ return app.handle_with_processors()
+
+ def get_parent_app(self):
+ if self in web.ctx.app_stack:
+ index = web.ctx.app_stack.index(self)
+ if index > 0:
+ return web.ctx.app_stack[index-1]
+
+ def notfound(self):
+ """Returns HTTPError with '404 not found' message"""
+ parent = self.get_parent_app()
+ if parent:
+ return parent.notfound()
+ else:
+ return web._NotFound()
+
+ def internalerror(self):
+ """Returns HTTPError with '500 internal error' message"""
+ parent = self.get_parent_app()
+ if parent:
+ return parent.internalerror()
+ elif web.config.get('debug'):
+ import debugerror
+ return debugerror.debugerror()
+ else:
+ return web._InternalError()
+
+class auto_application(application):
+ """Application similar to `application` but urls are constructed
+ automatiacally using metaclass.
+
+ >>> app = auto_application()
+ >>> class hello(app.page):
+ ... def GET(self): return "hello, world"
+ ...
+ >>> class foo(app.page):
+ ... path = '/foo/.*'
+ ... def GET(self): return "foo"
+ >>> app.request("/hello").data
+ 'hello, world'
+ >>> app.request('/foo/bar').data
+ 'foo'
+ """
+ def __init__(self):
+ application.__init__(self)
+
+ class metapage(type):
+ def __init__(klass, name, bases, attrs):
+ type.__init__(klass, name, bases, attrs)
+ path = attrs.get('path', '/' + name)
+
+ # path can be specified as None to ignore that class
+ # typically required to create a abstract base class.
+ if path is not None:
+ self.add_mapping(path, klass)
+
+ class page:
+ path = None
+ __metaclass__ = metapage
+
+ self.page = page
+
+# The application class already has the required functionality of subdir_application
+subdir_application = application
+
+class subdomain_application(application):
+ """
+ Application to delegate requests based on the host.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self): return "hello"
+ >>>
+ >>> mapping = (r"hello\.example\.com", app)
+ >>> app2 = subdomain_application(mapping)
+ >>> app2.request("/hello", host="hello.example.com").data
+ 'hello'
+ >>> response = app2.request("/hello", host="something.example.com")
+ >>> response.status
+ '404 Not Found'
+ >>> response.data
+ 'not found'
+ """
+ def handle(self):
+ host = web.ctx.host.split(':')[0] #strip port
+ fn, args = self._match(self.mapping, host)
+ return self._delegate(fn, self.fvars, args)
+
+ def _match(self, mapping, value):
+ for pat, what in utils.group(mapping, 2):
+ if isinstance(what, basestring):
+ what, result = utils.re_subm('^' + pat + '$', what, value)
+ else:
+ result = utils.re_compile('^' + pat + '$').match(value)
+
+ if result: # it's a match
+ return what, [x for x in result.groups()]
+ return None, None
+
+def loadhook(h):
+ """
+ Converts a load hook into an application processor.
+
+ >>> app = auto_application()
+ >>> def f(): "something done before handling request"
+ ...
+ >>> app.add_processor(loadhook(f))
+ """
+ def processor(handler):
+ h()
+ return handler()
+
+ return processor
+
+def unloadhook(h):
+ """
+ Converts an unload hook into an application processor.
+
+ >>> app = auto_application()
+ >>> def f(): "something done after handling request"
+ ...
+ >>> app.add_processor(unloadhook(f))
+ """
+ def processor(handler):
+ try:
+ result = handler()
+ is_generator = result and hasattr(result, 'next')
+ except:
+ # run the hook even when handler raises some exception
+ h()
+ raise
+
+ if is_generator:
+ return wrap(result)
+ else:
+ h()
+ return result
+
+ def wrap(result):
+ def next():
+ try:
+ return result.next()
+ except:
+ # call the hook at the and of iterator
+ h()
+ raise
+
+ result = iter(result)
+ while True:
+ yield next()
+
+ return processor
+
+def autodelegate(prefix=''):
+ """
+ Returns a method that takes one argument and calls the method named prefix+arg,
+ calling `notfound()` if there isn't one. Example:
+
+ urls = ('/prefs/(.*)', 'prefs')
+
+ class prefs:
+ GET = autodelegate('GET_')
+ def GET_password(self): pass
+ def GET_privacy(self): pass
+
+ `GET_password` would get called for `/prefs/password` while `GET_privacy` for
+ `GET_privacy` gets called for `/prefs/privacy`.
+
+ If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
+ is called.
+ """
+ def internal(self, arg):
+ if '/' in arg:
+ first, rest = arg.split('/', 1)
+ func = prefix + first
+ args = ['/' + rest]
+ else:
+ func = prefix + arg
+ args = []
+
+ if hasattr(self, func):
+ try:
+ return getattr(self, func)(*args)
+ except TypeError:
+ return web.notfound()
+ else:
+ return web.notfound()
+ return internal
+
+class Reloader:
+ """Checks to see if any loaded modules have changed on disk and,
+ if so, reloads them.
+ """
+ def __init__(self):
+ self.mtimes = {}
+
+ def __call__(self):
+ for mod in sys.modules.values():
+ self.check(mod)
+
+ def check(self, mod):
+ try:
+ mtime = os.stat(mod.__file__).st_mtime
+ except (AttributeError, OSError, IOError):
+ return
+ if mod.__file__.endswith('.pyc') and os.path.exists(mod.__file__[:-1]):
+ mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
+
+ if mod not in self.mtimes:
+ self.mtimes[mod] = mtime
+ elif self.mtimes[mod] < mtime:
+ try:
+ reload(mod)
+ self.mtimes[mod] = mtime
+ except ImportError:
+ pass
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/browser.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,236 @@
+"""Browser to test web applications.
+(from web.py)
+"""
+from utils import re_compile
+from net import htmlunquote
+
+import httplib, urllib, urllib2
+import copy
+from StringIO import StringIO
+
+DEBUG = False
+
+__all__ = [
+ "BrowserError",
+ "Browser", "AppBrowser",
+ "AppHandler"
+]
+
+class BrowserError(Exception):
+ pass
+
+class Browser:
+ def __init__(self):
+ import cookielib
+ self.cookiejar = cookielib.CookieJar()
+ self._cookie_processor = urllib2.HTTPCookieProcessor(self.cookiejar)
+ self.form = None
+
+ self.url = "http://0.0.0.0:8080/"
+ self.path = "/"
+
+ self.status = None
+ self.data = None
+ self._response = None
+ self._forms = None
+
+ def reset(self):
+ """Clears all cookies and history."""
+ self.cookiejar.clear()
+
+ def build_opener(self):
+ """Builds the opener using urllib2.build_opener.
+ Subclasses can override this function to prodive custom openers.
+ """
+ return urllib2.build_opener()
+
+ def do_request(self, req):
+ if DEBUG:
+ print 'requesting', req.get_method(), req.get_full_url()
+ opener = self.build_opener()
+ opener.add_handler(self._cookie_processor)
+ try:
+ self._response = opener.open(req)
+ except urllib2.HTTPError, e:
+ self._response = e
+
+ self.url = self._response.geturl()
+ self.path = urllib2.Request(self.url).get_selector()
+ self.data = self._response.read()
+ self.status = self._response.code
+ self._forms = None
+ self.form = None
+ return self.get_response()
+
+ def open(self, url, data=None, headers={}):
+ """Opens the specified url."""
+ url = urllib.basejoin(self.url, url)
+ req = urllib2.Request(url, data, headers)
+ return self.do_request(req)
+
+ def show(self):
+ """Opens the current page in real web browser."""
+ f = open('page.html', 'w')
+ f.write(self.data)
+ f.close()
+
+ import webbrowser, os
+ url = 'file://' + os.path.abspath('page.html')
+ webbrowser.open(url)
+
+ def get_response(self):
+ """Returns a copy of the current response."""
+ return urllib.addinfourl(StringIO(self.data), self._response.info(), self._response.geturl())
+
+ def get_soup(self):
+ """Returns beautiful soup of the current document."""
+ import BeautifulSoup
+ return BeautifulSoup.BeautifulSoup(self.data)
+
+ def get_text(self, e=None):
+ """Returns content of e or the current document as plain text."""
+ e = e or self.get_soup()
+ return ''.join([htmlunquote(c) for c in e.recursiveChildGenerator() if isinstance(c, unicode)])
+
+ def _get_links(self):
+ soup = self.get_soup()
+ return [a for a in soup.findAll(name='a')]
+
+ def get_links(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ """Returns all links in the document."""
+ return self._filter_links(self._get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+
+ def follow_link(self, link=None, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ if link is None:
+ links = self._filter_links(self.get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+ link = links and links[0]
+
+ if link:
+ return self.open(link['href'])
+ else:
+ raise BrowserError("No link found")
+
+ def find_link(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ links = self._filter_links(self.get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+ return links and links[0] or None
+
+ def _filter_links(self, links,
+ text=None, text_regex=None,
+ url=None, url_regex=None,
+ predicate=None):
+ predicates = []
+ if text is not None:
+ predicates.append(lambda link: link.string == text)
+ if text_regex is not None:
+ predicates.append(lambda link: re_compile(text_regex).search(link.string or ''))
+ if url is not None:
+ predicates.append(lambda link: link.get('href') == url)
+ if url_regex is not None:
+ predicates.append(lambda link: re_compile(url_regex).search(link.get('href', '')))
+ if predicate:
+ predicate.append(predicate)
+
+ def f(link):
+ for p in predicates:
+ if not p(link):
+ return False
+ return True
+
+ return [link for link in links if f(link)]
+
+ def get_forms(self):
+ """Returns all forms in the current document.
+ The returned form objects implement the ClientForm.HTMLForm interface.
+ """
+ if self._forms is None:
+ import ClientForm
+ self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False)
+ return self._forms
+
+ def select_form(self, name=None, predicate=None, index=0):
+ """Selects the specified form."""
+ forms = self.get_forms()
+
+ if name is not None:
+ forms = [f for f in forms if f.name == name]
+ if predicate:
+ forms = [f for f in forms if predicate(f)]
+
+ if forms:
+ self.form = forms[index]
+ return self.form
+ else:
+ raise BrowserError("No form selected.")
+
+ def submit(self, **kw):
+ """submits the currently selected form."""
+ if self.form is None:
+ raise BrowserError("No form selected.")
+ req = self.form.click(**kw)
+ return self.do_request(req)
+
+ def __getitem__(self, key):
+ return self.form[key]
+
+ def __setitem__(self, key, value):
+ self.form[key] = value
+
+class AppBrowser(Browser):
+ """Browser interface to test web.py apps.
+
+ b = AppBrowser(app)
+ b.open('/')
+ b.follow_link(text='Login')
+
+ b.select_form(name='login')
+ b['username'] = 'joe'
+ b['password'] = 'secret'
+ b.submit()
+
+ assert b.path == '/'
+ assert 'Welcome joe' in b.get_text()
+ """
+ def __init__(self, app):
+ Browser.__init__(self)
+ self.app = app
+
+ def build_opener(self):
+ return urllib2.build_opener(AppHandler(self.app))
+
+class AppHandler(urllib2.HTTPHandler):
+ """urllib2 handler to handle requests using web.py application."""
+ handler_order = 100
+
+ def __init__(self, app):
+ self.app = app
+
+ def http_open(self, req):
+ result = self.app.request(
+ localpart=req.get_selector(),
+ method=req.get_method(),
+ host=req.get_host(),
+ data=req.get_data(),
+ headers=dict(req.header_items()),
+ https=req.get_type() == "https"
+ )
+ return self._make_response(result, req.get_full_url())
+
+ def https_open(self, req):
+ return self.http_open(req)
+
+ try:
+ https_request = urllib2.HTTPHandler.do_request_
+ except AttributeError:
+ # for python 2.3
+ pass
+
+ def _make_response(self, result, url):
+ data = "\r\n".join(["%s: %s" % (k, v) for k, v in result.header_items])
+ headers = httplib.HTTPMessage(StringIO(data))
+ response = urllib.addinfourl(StringIO(result.data), headers, url)
+ code, msg = result.status.split(None, 1)
+ response.code, response.msg = int(code), msg
+ return response
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/contrib/template.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,131 @@
+"""
+Interface to various templating engines.
+"""
+import os.path
+
+__all__ = [
+ "render_cheetah", "render_genshi", "render_mako",
+ "cache",
+]
+
+class render_cheetah:
+ """Rendering interface to Cheetah Templates.
+
+ Example:
+
+ render = render_cheetah('templates')
+ render.hello(name="cheetah")
+ """
+ def __init__(self, path):
+ # give error if Chetah is not installed
+ from Cheetah.Template import Template
+ self.path = path
+
+ def __getattr__(self, name):
+ from Cheetah.Template import Template
+ path = os.path.join(self.path, name + ".html")
+
+ def template(**kw):
+ t = Template(file=path, searchList=[kw])
+ return t.respond()
+
+ return template
+
+class render_genshi:
+ """Rendering interface genshi templates.
+ Example:
+
+ for xml/html templates.
+
+ render = render_genshi(['templates/'])
+ render.hello(name='genshi')
+
+ For text templates:
+
+ render = render_genshi(['templates/'], type='text')
+ render.hello(name='genshi')
+ """
+
+ def __init__(self, *a, **kwargs):
+ from genshi.template import TemplateLoader
+
+ self._type = kwargs.pop('type', None)
+ self._loader = TemplateLoader(*a, **kwargs)
+
+ def __getattr__(self, name):
+ # Assuming all templates are html
+ path = name + ".html"
+
+ if self._type == "text":
+ from genshi.template import TextTemplate
+ cls = TextTemplate
+ type = "text"
+ else:
+ cls = None
+ type = None
+
+ t = self._loader.load(path, cls=cls)
+ def template(**kw):
+ stream = t.generate(**kw)
+ if type:
+ return stream.render(type)
+ else:
+ return stream.render()
+ return template
+
+class render_jinja:
+ """Rendering interface to Jinja2 Templates
+
+ Example:
+
+ render= render_jinja('templates')
+ render.hello(name='jinja2')
+ """
+ def __init__(self, *a, **kwargs):
+ extensions = kwargs.pop('extensions', [])
+ globals = kwargs.pop('globals', {})
+
+ from jinja2 import Environment,FileSystemLoader
+ self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)
+ self._lookup.globals.update(globals)
+
+ def __getattr__(self, name):
+ # Assuming all templates end with .html
+ path = name + '.html'
+ t = self._lookup.get_template(path)
+ return t.render
+
+class render_mako:
+ """Rendering interface to Mako Templates.
+
+ Example:
+
+ render = render_mako(directories=['templates'])
+ render.hello(name="mako")
+ """
+ def __init__(self, *a, **kwargs):
+ from mako.lookup import TemplateLookup
+ self._lookup = TemplateLookup(*a, **kwargs)
+
+ def __getattr__(self, name):
+ # Assuming all templates are html
+ path = name + ".html"
+ t = self._lookup.get_template(path)
+ return t.render
+
+class cache:
+ """Cache for any rendering interface.
+
+ Example:
+
+ render = cache(render_cheetah("templates/"))
+ render.hello(name='cache')
+ """
+ def __init__(self, render):
+ self._render = render
+ self._cache = {}
+
+ def __getattr__(self, name):
+ if name not in self._cache:
+ self._cache[name] = getattr(self._render, name)
+ return self._cache[name]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/db.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,1137 @@
+"""
+Database API
+(part of web.py)
+"""
+
+__all__ = [
+ "UnknownParamstyle", "UnknownDB", "TransactionError",
+ "sqllist", "sqlors", "reparam", "sqlquote",
+ "SQLQuery", "SQLParam", "sqlparam",
+ "SQLLiteral", "sqlliteral",
+ "database", 'DB',
+]
+
+import time
+try:
+ import datetime
+except ImportError:
+ datetime = None
+
+from utils import threadeddict, storage, iters, iterbetter
+
+try:
+ # db module can work independent of web.py
+ from webapi import debug, config
+except:
+ import sys
+ debug = sys.stderr
+ config = storage()
+
+class UnknownDB(Exception):
+ """raised for unsupported dbms"""
+ pass
+
+class _ItplError(ValueError):
+ def __init__(self, text, pos):
+ ValueError.__init__(self)
+ self.text = text
+ self.pos = pos
+ def __str__(self):
+ return "unfinished expression in %s at char %d" % (
+ repr(self.text), self.pos)
+
+class TransactionError(Exception): pass
+
+class UnknownParamstyle(Exception):
+ """
+ raised for unsupported db paramstyles
+
+ (currently supported: qmark, numeric, format, pyformat)
+ """
+ pass
+
+class SQLParam:
+ """
+ Parameter in SQLQuery.
+
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
+ >>> q
+ <sql: "SELECT * FROM test WHERE name='joe'">
+ >>> q.query()
+ 'SELECT * FROM test WHERE name=%s'
+ >>> q.values()
+ ['joe']
+ """
+ def __init__(self, value):
+ self.value = value
+
+ def get_marker(self, paramstyle='pyformat'):
+ if paramstyle == 'qmark':
+ return '?'
+ elif paramstyle == 'numeric':
+ return ':1'
+ elif paramstyle is None or paramstyle in ['format', 'pyformat']:
+ return '%s'
+ raise UnknownParamstyle, paramstyle
+
+ def sqlquery(self):
+ return SQLQuery([self])
+
+ def __add__(self, other):
+ return self.sqlquery() + other
+
+ def __radd__(self, other):
+ return other + self.sqlquery()
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return '<param: %s>' % repr(self.value)
+
+sqlparam = SQLParam
+
+class SQLQuery:
+ """
+ You can pass this sort of thing as a clause in any db function.
+ Otherwise, you can pass a dictionary to the keyword argument `vars`
+ and the function will call reparam for you.
+
+ Internally, consists of `items`, which is a list of strings and
+ SQLParams, which get concatenated to produce the actual query.
+ """
+ # tested in sqlquote's docstring
+ def __init__(self, items=[]):
+ """Creates a new SQLQuery.
+
+ >>> SQLQuery("x")
+ <sql: 'x'>
+ >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
+ >>> q
+ <sql: 'SELECT * FROM test WHERE x=1'>
+ >>> q.query(), q.values()
+ ('SELECT * FROM test WHERE x=%s', [1])
+ >>> SQLQuery(SQLParam(1))
+ <sql: '1'>
+ """
+ if isinstance(items, list):
+ self.items = items
+ elif isinstance(items, SQLParam):
+ self.items = [items]
+ elif isinstance(items, SQLQuery):
+ self.items = list(items.items)
+ else:
+ self.items = [str(items)]
+
+ # Take care of SQLLiterals
+ for i, item in enumerate(self.items):
+ if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
+ self.items[i] = item.value.v
+
+ def __add__(self, other):
+ if isinstance(other, basestring):
+ items = [other]
+ elif isinstance(other, SQLQuery):
+ items = other.items
+ else:
+ return NotImplemented
+ return SQLQuery(self.items + items)
+
+ def __radd__(self, other):
+ if isinstance(other, basestring):
+ items = [other]
+ else:
+ return NotImplemented
+
+ return SQLQuery(items + self.items)
+
+ def __iadd__(self, other):
+ if isinstance(other, basestring):
+ items = [other]
+ elif isinstance(other, SQLQuery):
+ items = other.items
+ else:
+ return NotImplemented
+ self.items.extend(items)
+ return self
+
+ def __len__(self):
+ return len(self.query())
+
+ def query(self, paramstyle=None):
+ """
+ Returns the query part of the sql query.
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
+ >>> q.query()
+ 'SELECT * FROM test WHERE name=%s'
+ >>> q.query(paramstyle='qmark')
+ 'SELECT * FROM test WHERE name=?'
+ """
+ s = ''
+ for x in self.items:
+ if isinstance(x, SQLParam):
+ x = x.get_marker(paramstyle)
+ s += x
+ return s
+
+ def values(self):
+ """
+ Returns the values of the parameters used in the sql query.
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
+ >>> q.values()
+ ['joe']
+ """
+ return [i.value for i in self.items if isinstance(i, SQLParam)]
+
+ def join(items, sep=' '):
+ """
+ Joins multiple queries.
+
+ >>> SQLQuery.join(['a', 'b'], ', ')
+ <sql: 'a, b'>
+ """
+ if len(items) == 0:
+ return SQLQuery("")
+
+ q = SQLQuery(items[0])
+ for item in items[1:]:
+ q += sep
+ q += item
+ return q
+
+ join = staticmethod(join)
+
+ def __str__(self):
+ try:
+ return self.query() % tuple([sqlify(x) for x in self.values()])
+ except (ValueError, TypeError):
+ return self.query()
+
+ def __repr__(self):
+ return '<sql: %s>' % repr(str(self))
+
+class SQLLiteral:
+ """
+ Protects a string from `sqlquote`.
+
+ >>> sqlquote('NOW()')
+ <sql: "'NOW()'">
+ >>> sqlquote(SQLLiteral('NOW()'))
+ <sql: 'NOW()'>
+ """
+ def __init__(self, v):
+ self.v = v
+
+ def __repr__(self):
+ return self.v
+
+sqlliteral = SQLLiteral
+
+def _sqllist(values):
+ """
+ >>> _sqllist([1, 2, 3])
+ <sql: '(1, 2, 3)'>
+ """
+ items = []
+ items.append('(')
+ for i, v in enumerate(values):
+ if i != 0:
+ items.append(', ')
+ items.append(sqlparam(v))
+ items.append(')')
+ return SQLQuery(items)
+
+def reparam(string_, dictionary):
+ """
+ Takes a string and a dictionary and interpolates the string
+ using values from the dictionary. Returns an `SQLQuery` for the result.
+
+ >>> reparam("s = $s", dict(s=True))
+ <sql: "s = 't'">
+ >>> reparam("s IN $s", dict(s=[1, 2]))
+ <sql: 's IN (1, 2)'>
+ """
+ dictionary = dictionary.copy() # eval mucks with it
+ vals = []
+ result = []
+ for live, chunk in _interpolate(string_):
+ if live:
+ v = eval(chunk, dictionary)
+ result.append(sqlquote(v))
+ else:
+ result.append(chunk)
+ return SQLQuery.join(result, '')
+
+def sqlify(obj):
+ """
+ converts `obj` to its proper SQL version
+
+ >>> sqlify(None)
+ 'NULL'
+ >>> sqlify(True)
+ "'t'"
+ >>> sqlify(3)
+ '3'
+ """
+ # because `1 == True and hash(1) == hash(True)`
+ # we have to do this the hard way...
+
+ if obj is None:
+ return 'NULL'
+ elif obj is True:
+ return "'t'"
+ elif obj is False:
+ return "'f'"
+ elif datetime and isinstance(obj, datetime.datetime):
+ return repr(obj.isoformat())
+ else:
+ return repr(obj)
+
+def sqllist(lst):
+ """
+ Converts the arguments for use in something like a WHERE clause.
+
+ >>> sqllist(['a', 'b'])
+ 'a, b'
+ >>> sqllist('a')
+ 'a'
+ >>> sqllist(u'abc')
+ u'abc'
+ """
+ if isinstance(lst, basestring):
+ return lst
+ else:
+ return ', '.join(lst)
+
+def sqlors(left, lst):
+ """
+ `left is a SQL clause like `tablename.arg = `
+ and `lst` is a list of values. Returns a reparam-style
+ pair featuring the SQL that ORs together the clause
+ for each item in the lst.
+
+ >>> sqlors('foo = ', [])
+ <sql: '1=2'>
+ >>> sqlors('foo = ', [1])
+ <sql: 'foo = 1'>
+ >>> sqlors('foo = ', 1)
+ <sql: 'foo = 1'>
+ >>> sqlors('foo = ', [1,2,3])
+ <sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
+ """
+ if isinstance(lst, iters):
+ lst = list(lst)
+ ln = len(lst)
+ if ln == 0:
+ return SQLQuery("1=2")
+ if ln == 1:
+ lst = lst[0]
+
+ if isinstance(lst, iters):
+ return SQLQuery(['('] +
+ sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
+ ['1=2)']
+ )
+ else:
+ return left + sqlparam(lst)
+
+def sqlwhere(dictionary, grouping=' AND '):
+ """
+ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
+
+ >>> sqlwhere({'cust_id': 2, 'order_id':3})
+ <sql: 'order_id = 3 AND cust_id = 2'>
+ >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
+ <sql: 'order_id = 3, cust_id = 2'>
+ >>> sqlwhere({'a': 'a', 'b': 'b'}).query()
+ 'a = %s AND b = %s'
+ """
+ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
+
+def sqlquote(a):
+ """
+ Ensures `a` is quoted properly for use in a SQL query.
+
+ >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
+ <sql: "WHERE x = 't' AND y = 3">
+ >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
+ <sql: "WHERE x = 't' AND y IN (2, 3)">
+ """
+ if isinstance(a, list):
+ return _sqllist(a)
+ else:
+ return sqlparam(a).sqlquery()
+
+class Transaction:
+ """Database transaction."""
+ def __init__(self, ctx):
+ self.ctx = ctx
+ self.transaction_count = transaction_count = len(ctx.transactions)
+
+ class transaction_engine:
+ """Transaction Engine used in top level transactions."""
+ def do_transact(self):
+ ctx.commit(unload=False)
+
+ def do_commit(self):
+ ctx.commit()
+
+ def do_rollback(self):
+ ctx.rollback()
+
+ class subtransaction_engine:
+ """Transaction Engine used in sub transactions."""
+ def query(self, q):
+ db_cursor = ctx.db.cursor()
+ ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
+
+ def do_transact(self):
+ self.query('SAVEPOINT webpy_sp_%s')
+
+ def do_commit(self):
+ self.query('RELEASE SAVEPOINT webpy_sp_%s')
+
+ def do_rollback(self):
+ self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
+
+ class dummy_engine:
+ """Transaction Engine used instead of subtransaction_engine
+ when sub transactions are not supported."""
+ do_transact = do_commit = do_rollback = lambda self: None
+
+ if self.transaction_count:
+ # nested transactions are not supported in some databases
+ if self.ctx.get('ignore_nested_transactions'):
+ self.engine = dummy_engine()
+ else:
+ self.engine = subtransaction_engine()
+ else:
+ self.engine = transaction_engine()
+
+ self.engine.do_transact()
+ self.ctx.transactions.append(self)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exctype, excvalue, traceback):
+ if exctype is not None:
+ self.rollback()
+ else:
+ self.commit()
+
+ def commit(self):
+ if len(self.ctx.transactions) > self.transaction_count:
+ self.engine.do_commit()
+ self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
+
+ def rollback(self):
+ if len(self.ctx.transactions) > self.transaction_count:
+ self.engine.do_rollback()
+ self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
+
+class DB:
+ """Database"""
+ def __init__(self, db_module, keywords):
+ """Creates a database.
+ """
+ # some DB implementaions take optional paramater `driver` to use a specific driver modue
+ # but it should not be passed to connect
+ keywords.pop('driver', None)
+
+ self.db_module = db_module
+ self.keywords = keywords
+
+
+ self._ctx = threadeddict()
+ # flag to enable/disable printing queries
+ self.printing = config.get('debug', False)
+ self.supports_multiple_insert = False
+
+ try:
+ import DBUtils
+ # enable pooling if DBUtils module is available.
+ self.has_pooling = True
+ except ImportError:
+ self.has_pooling = False
+
+ # Pooling can be disabled by passing pooling=False in the keywords.
+ self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
+
+ def _getctx(self):
+ if not self._ctx.get('db'):
+ self._load_context(self._ctx)
+ return self._ctx
+ ctx = property(_getctx)
+
+ def _load_context(self, ctx):
+ ctx.dbq_count = 0
+ ctx.transactions = [] # stack of transactions
+
+ if self.has_pooling:
+ ctx.db = self._connect_with_pooling(self.keywords)
+ else:
+ ctx.db = self._connect(self.keywords)
+ ctx.db_execute = self._db_execute
+
+ if not hasattr(ctx.db, 'commit'):
+ ctx.db.commit = lambda: None
+
+ if not hasattr(ctx.db, 'rollback'):
+ ctx.db.rollback = lambda: None
+
+ def commit(unload=True):
+ # do db commit and release the connection if pooling is enabled.
+ ctx.db.commit()
+ if unload and self.has_pooling:
+ self._unload_context(self._ctx)
+
+ def rollback():
+ # do db rollback and release the connection if pooling is enabled.
+ ctx.db.rollback()
+ if self.has_pooling:
+ self._unload_context(self._ctx)
+
+ ctx.commit = commit
+ ctx.rollback = rollback
+
+ def _unload_context(self, ctx):
+ del ctx.db
+
+ def _connect(self, keywords):
+ return self.db_module.connect(**keywords)
+
+ def _connect_with_pooling(self, keywords):
+ def get_pooled_db():
+ from DBUtils import PooledDB
+
+ # In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
+ # see Bug#122112
+
+ if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
+ return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
+ else:
+ return PooledDB.PooledDB(creator=self.db_module, **keywords)
+
+ if getattr(self, '_pooleddb', None) is None:
+ self._pooleddb = get_pooled_db()
+
+ return self._pooleddb.connection()
+
+ def _db_cursor(self):
+ return self.ctx.db.cursor()
+
+ def _param_marker(self):
+ """Returns parameter marker based on paramstyle attribute if this database."""
+ style = getattr(self, 'paramstyle', 'pyformat')
+
+ if style == 'qmark':
+ return '?'
+ elif style == 'numeric':
+ return ':1'
+ elif style in ['format', 'pyformat']:
+ return '%s'
+ raise UnknownParamstyle, style
+
+ def _db_execute(self, cur, sql_query):
+ """executes an sql query"""
+ self.ctx.dbq_count += 1
+
+ try:
+ a = time.time()
+ paramstyle = getattr(self, 'paramstyle', 'pyformat')
+ out = cur.execute(sql_query.query(paramstyle), sql_query.values())
+ b = time.time()
+ except:
+ if self.printing:
+ print >> debug, 'ERR:', str(sql_query)
+ if self.ctx.transactions:
+ self.ctx.transactions[-1].rollback()
+ else:
+ self.ctx.rollback()
+ raise
+
+ if self.printing:
+ print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))
+ return out
+
+ def _where(self, where, vars):
+ if isinstance(where, (int, long)):
+ where = "id = " + sqlparam(where)
+ #@@@ for backward-compatibility
+ elif isinstance(where, (list, tuple)) and len(where) == 2:
+ where = SQLQuery(where[0], where[1])
+ elif isinstance(where, SQLQuery):
+ pass
+ else:
+ where = reparam(where, vars)
+ return where
+
+ def query(self, sql_query, vars=None, processed=False, _test=False):
+ """
+ Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
+ If `processed=True`, `vars` is a `reparam`-style list to use
+ instead of interpolating.
+
+ >>> db = DB(None, {})
+ >>> db.query("SELECT * FROM foo", _test=True)
+ <sql: 'SELECT * FROM foo'>
+ >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
+ <sql: "SELECT * FROM foo WHERE x = 'f'">
+ >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
+ <sql: "SELECT * FROM foo WHERE x = 'f'">
+ """
+ if vars is None: vars = {}
+
+ if not processed and not isinstance(sql_query, SQLQuery):
+ sql_query = reparam(sql_query, vars)
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, sql_query)
+
+ if db_cursor.description:
+ names = [x[0] for x in db_cursor.description]
+ def iterwrapper():
+ row = db_cursor.fetchone()
+ while row:
+ yield storage(dict(zip(names, row)))
+ row = db_cursor.fetchone()
+ out = iterbetter(iterwrapper())
+ out.__len__ = lambda: int(db_cursor.rowcount)
+ out.list = lambda: [storage(dict(zip(names, x))) \
+ for x in db_cursor.fetchall()]
+ else:
+ out = db_cursor.rowcount
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+ def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
+ limit=None, offset=None, _test=False):
+ """
+ Selects `what` from `tables` with clauses `where`, `order`,
+ `group`, `limit`, and `offset`. Uses vars to interpolate.
+ Otherwise, each clause can be a SQLQuery.
+
+ >>> db = DB(None, {})
+ >>> db.select('foo', _test=True)
+ <sql: 'SELECT * FROM foo'>
+ >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
+ <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
+ """
+ if vars is None: vars = {}
+ sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
+ clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
+ qout = SQLQuery.join(clauses)
+ if _test: return qout
+ return self.query(qout, processed=True)
+
+ def where(self, table, what='*', order=None, group=None, limit=None,
+ offset=None, _test=False, **kwargs):
+ """
+ Selects from `table` where keys are equal to values in `kwargs`.
+
+ >>> db = DB(None, {})
+ >>> db.where('foo', bar_id=3, _test=True)
+ <sql: 'SELECT * FROM foo WHERE bar_id = 3'>
+ >>> db.where('foo', source=2, crust='dewey', _test=True)
+ <sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
+ """
+ where = []
+ for k, v in kwargs.iteritems():
+ where.append(k + ' = ' + sqlquote(v))
+ return self.select(table, what=what, order=order,
+ group=group, limit=limit, offset=offset, _test=_test,
+ where=SQLQuery.join(where, ' AND '))
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', what),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order),
+ ('LIMIT', limit),
+ ('OFFSET', offset))
+
+ def gen_clause(self, sql, val, vars):
+ if isinstance(val, (int, long)):
+ if sql == 'WHERE':
+ nout = 'id = ' + sqlquote(val)
+ else:
+ nout = SQLQuery(val)
+ #@@@
+ elif isinstance(val, (list, tuple)) and len(val) == 2:
+ nout = SQLQuery(val[0], val[1]) # backwards-compatibility
+ elif isinstance(val, SQLQuery):
+ nout = val
+ else:
+ nout = reparam(val, vars)
+
+ def xjoin(a, b):
+ if a and b: return a + ' ' + b
+ else: return a or b
+
+ return xjoin(sql, nout)
+
+ def insert(self, tablename, seqname=None, _test=False, **values):
+ """
+ Inserts `values` into `tablename`. Returns current sequence ID.
+ Set `seqname` to the ID if it's not the default, or to `False`
+ if there isn't one.
+
+ >>> db = DB(None, {})
+ >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
+ >>> q
+ <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
+ >>> q.query()
+ 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
+ >>> q.values()
+ [2, 'bob']
+ """
+ def q(x): return "(" + x + ")"
+
+ if values:
+ _keys = SQLQuery.join(values.keys(), ', ')
+ _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
+ sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
+ else:
+ sql_query = SQLQuery("INSERT INTO %s DEFAULT VALUES" % tablename)
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ if seqname is not False:
+ sql_query = self._process_insert_query(sql_query, tablename, seqname)
+
+ if isinstance(sql_query, tuple):
+ # for some databases, a separate query has to be made to find
+ # the id of the inserted row.
+ q1, q2 = sql_query
+ self._db_execute(db_cursor, q1)
+ self._db_execute(db_cursor, q2)
+ else:
+ self._db_execute(db_cursor, sql_query)
+
+ try:
+ out = db_cursor.fetchone()[0]
+ except Exception:
+ out = None
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+ def multiple_insert(self, tablename, values, seqname=None, _test=False):
+ """
+ Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
+ one for each row to be inserted, each with the same set of keys.
+ Returns the list of ids of the inserted rows.
+ Set `seqname` to the ID if it's not the default, or to `False`
+ if there isn't one.
+
+ >>> db = DB(None, {})
+ >>> db.supports_multiple_insert = True
+ >>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
+ >>> db.multiple_insert('person', values=values, _test=True)
+ <sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')">
+ """
+ if not values:
+ return []
+
+ if not self.supports_multiple_insert:
+ out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
+ if seqname is False:
+ return None
+ else:
+ return out
+
+ keys = values[0].keys()
+ #@@ make sure all keys are valid
+
+ # make sure all rows have same keys.
+ for v in values:
+ if v.keys() != keys:
+ raise ValueError, 'Bad data'
+
+ sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
+
+ data = []
+ for row in values:
+ d = SQLQuery.join([SQLParam(row[k]) for k in keys], ', ')
+ data.append('(' + d + ')')
+ sql_query += SQLQuery.join(data, ', ')
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ if seqname is not False:
+ sql_query = self._process_insert_query(sql_query, tablename, seqname)
+
+ if isinstance(sql_query, tuple):
+ # for some databases, a separate query has to be made to find
+ # the id of the inserted row.
+ q1, q2 = sql_query
+ self._db_execute(db_cursor, q1)
+ self._db_execute(db_cursor, q2)
+ else:
+ self._db_execute(db_cursor, sql_query)
+
+ try:
+ out = db_cursor.fetchone()[0]
+ out = range(out-len(values)+1, out+1)
+ except Exception:
+ out = None
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+
+ def update(self, tables, where, vars=None, _test=False, **values):
+ """
+ Update `tables` with clause `where` (interpolated using `vars`)
+ and setting `values`.
+
+ >>> db = DB(None, {})
+ >>> name = 'Joseph'
+ >>> q = db.update('foo', where='name = $name', name='bob', age=2,
+ ... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
+ >>> q
+ <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
+ >>> q.query()
+ 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
+ >>> q.values()
+ [2, 'bob', 'Joseph']
+ """
+ if vars is None: vars = {}
+ where = self._where(where, vars)
+
+ query = (
+ "UPDATE " + sqllist(tables) +
+ " SET " + sqlwhere(values, ', ') +
+ " WHERE " + where)
+
+ if _test: return query
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, query)
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return db_cursor.rowcount
+
+ def delete(self, table, where, using=None, vars=None, _test=False):
+ """
+ Deletes from `table` with clauses `where` and `using`.
+
+ >>> db = DB(None, {})
+ >>> name = 'Joe'
+ >>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
+ <sql: "DELETE FROM foo WHERE name = 'Joe'">
+ """
+ if vars is None: vars = {}
+ where = self._where(where, vars)
+
+ q = 'DELETE FROM ' + table
+ if where: q += ' WHERE ' + where
+ if using: q += ' USING ' + sqllist(using)
+
+ if _test: return q
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, q)
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return db_cursor.rowcount
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query
+
+ def transaction(self):
+ """Start a transaction."""
+ return Transaction(self.ctx)
+
+class PostgresDB(DB):
+ """Postgres driver."""
+ def __init__(self, **keywords):
+ if 'pw' in keywords:
+ keywords['password'] = keywords['pw']
+ del keywords['pw']
+
+ db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
+ if db_module.__name__ == "psycopg2":
+ import psycopg2.extensions
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+
+ keywords['database'] = keywords.pop('db')
+ self.dbname = "postgres"
+ self.paramstyle = db_module.paramstyle
+ DB.__init__(self, db_module, keywords)
+ self.supports_multiple_insert = True
+
+ def _process_insert_query(self, query, tablename, seqname):
+ if seqname is None:
+ seqname = tablename + "_id_seq"
+ return query + "; SELECT currval('%s')" % seqname
+
+ def _connect(self, keywords):
+ conn = DB._connect(self, keywords)
+ conn.set_client_encoding('UTF8')
+ return conn
+
+ def _connect_with_pooling(self, keywords):
+ conn = DB._connect_with_pooling(self, keywords)
+ conn._con._con.set_client_encoding('UTF8')
+ return conn
+
+class MySQLDB(DB):
+ def __init__(self, **keywords):
+ import MySQLdb as db
+ if 'pw' in keywords:
+ keywords['passwd'] = keywords['pw']
+ del keywords['pw']
+
+ if 'charset' not in keywords:
+ keywords['charset'] = 'utf8'
+ elif keywords['charset'] is None:
+ del keywords['charset']
+
+ self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
+ self.dbname = "mysql"
+ DB.__init__(self, db, keywords)
+ self.supports_multiple_insert = True
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query, SQLQuery('SELECT last_insert_id();')
+
+def import_driver(drivers, preferred=None):
+ """Import the first available driver or preferred driver.
+ """
+ if preferred:
+ drivers = [preferred]
+
+ for d in drivers:
+ try:
+ return __import__(d, None, None, ['x'])
+ except ImportError:
+ pass
+ raise ImportError("Unable to import " + " or ".join(drivers))
+
+class SqliteDB(DB):
+ def __init__(self, **keywords):
+ db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
+
+ if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
+ db.paramstyle = 'qmark'
+
+ self.paramstyle = db.paramstyle
+ keywords['database'] = keywords.pop('db')
+ self.dbname = "sqlite"
+ DB.__init__(self, db, keywords)
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query, SQLQuery('SELECT last_insert_rowid();')
+
+ def query(self, *a, **kw):
+ out = DB.query(self, *a, **kw)
+ if isinstance(out, iterbetter):
+ # rowcount is not provided by sqlite
+ del out.__len__
+ return out
+
+class FirebirdDB(DB):
+ """Firebird Database.
+ """
+ def __init__(self, **keywords):
+ try:
+ import kinterbasdb as db
+ except Exception:
+ db = None
+ pass
+ if 'pw' in keywords:
+ keywords['passwd'] = keywords['pw']
+ del keywords['pw']
+ keywords['database'] = keywords['db']
+ del keywords['db']
+ DB.__init__(self, db, keywords)
+
+ def delete(self, table, where=None, using=None, vars=None, _test=False):
+ # firebird doesn't support using clause
+ using=None
+ return DB.delete(self, table, where, using, vars, _test)
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', ''),
+ ('FIRST', limit),
+ ('SKIP', offset),
+ ('', what),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order)
+ )
+
+class MSSQLDB(DB):
+ def __init__(self, **keywords):
+ import pymssql as db
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+ keywords['database'] = keywords.pop('db')
+ self.dbname = "mssql"
+ DB.__init__(self, db, keywords)
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', what),
+ ('TOP', limit),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order),
+ ('OFFSET', offset))
+
+ def _test(self):
+ """Test LIMIT.
+
+ Fake presence of pymssql module for running tests.
+ >>> import sys
+ >>> sys.modules['pymssql'] = sys.modules['sys']
+
+ MSSQL has TOP clause instead of LIMIT clause.
+ >>> db = MSSQLDB(db='test', user='joe', pw='secret')
+ >>> db.select('foo', limit=4, _test=True)
+ <sql: 'SELECT * TOP 4 FROM foo'>
+ """
+ pass
+
+class OracleDB(DB):
+ def __init__(self, **keywords):
+ import cx_Oracle as db
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+
+ #@@ TODO: use db.makedsn if host, port is specified
+ keywords['dsn'] = keywords.pop('db')
+ self.dbname = 'oracle'
+ db.paramstyle = 'numeric'
+ self.paramstyle = db.paramstyle
+
+ # oracle doesn't support pooling
+ keywords.pop('pooling', None)
+ DB.__init__(self, db, keywords)
+
+ def _process_insert_query(self, query, tablename, seqname):
+ if seqname is None:
+ # It is not possible to get seq name from table name in Oracle
+ return query
+ else:
+ return query + "; SELECT %s.currval FROM dual" % seqname
+
+_databases = {}
+def database(dburl=None, **params):
+ """Creates appropriate database using params.
+
+ Pooling will be enabled if DBUtils module is available.
+ Pooling can be disabled by passing pooling=False in params.
+ """
+ dbn = params.pop('dbn')
+ if dbn in _databases:
+ return _databases[dbn](**params)
+ else:
+ raise UnknownDB, dbn
+
+def register_database(name, clazz):
+ """
+ Register a database.
+
+ >>> class LegacyDB(DB):
+ ... def __init__(self, **params):
+ ... pass
+ ...
+ >>> register_database('legacy', LegacyDB)
+ >>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
+ """
+ _databases[name] = clazz
+
+register_database('mysql', MySQLDB)
+register_database('postgres', PostgresDB)
+register_database('sqlite', SqliteDB)
+register_database('firebird', FirebirdDB)
+register_database('mssql', MSSQLDB)
+register_database('oracle', OracleDB)
+
+def _interpolate(format):
+ """
+ Takes a format string and returns a list of 2-tuples of the form
+ (boolean, string) where boolean says whether string should be evaled
+ or not.
+
+ from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
+ """
+ from tokenize import tokenprog
+
+ def matchorfail(text, pos):
+ match = tokenprog.match(text, pos)
+ if match is None:
+ raise _ItplError(text, pos)
+ return match, match.end()
+
+ namechars = "abcdefghijklmnopqrstuvwxyz" \
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+ chunks = []
+ pos = 0
+
+ while 1:
+ dollar = format.find("$", pos)
+ if dollar < 0:
+ break
+ nextchar = format[dollar + 1]
+
+ if nextchar == "{":
+ chunks.append((0, format[pos:dollar]))
+ pos, level = dollar + 2, 1
+ while level:
+ match, pos = matchorfail(format, pos)
+ tstart, tend = match.regs[3]
+ token = format[tstart:tend]
+ if token == "{":
+ level = level + 1
+ elif token == "}":
+ level = level - 1
+ chunks.append((1, format[dollar + 2:pos - 1]))
+
+ elif nextchar in namechars:
+ chunks.append((0, format[pos:dollar]))
+ match, pos = matchorfail(format, dollar + 1)
+ while pos < len(format):
+ if format[pos] == "." and \
+ pos + 1 < len(format) and format[pos + 1] in namechars:
+ match, pos = matchorfail(format, pos + 1)
+ elif format[pos] in "([":
+ pos, level = pos + 1, 1
+ while level:
+ match, pos = matchorfail(format, pos)
+ tstart, tend = match.regs[3]
+ token = format[tstart:tend]
+ if token[0] in "([":
+ level = level + 1
+ elif token[0] in ")]":
+ level = level - 1
+ else:
+ break
+ chunks.append((1, format[dollar + 1:pos]))
+ else:
+ chunks.append((0, format[pos:dollar + 1]))
+ pos = dollar + 1 + (nextchar == "$")
+
+ if pos < len(format):
+ chunks.append((0, format[pos:]))
+ return chunks
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/debugerror.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,356 @@
+"""
+pretty debug errors
+(part of web.py)
+
+portions adapted from Django <djangoproject.com>
+Copyright (c) 2005, the Lawrence Journal-World
+Used under the modified BSD license:
+http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
+"""
+
+__all__ = ["debugerror", "djangoerror", "emailerrors"]
+
+import sys, urlparse, pprint, traceback
+from net import websafe
+from template import Template
+from utils import sendmail
+import webapi as web
+
+import os, os.path
+whereami = os.path.join(os.getcwd(), __file__)
+whereami = os.path.sep.join(whereami.split(os.path.sep)[:-1])
+djangoerror_t = """\
+$def with (exception_type, exception_value, frames)
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html lang="en">
+<head>
+ <meta http-equiv="content-type" content="text/html; charset=utf-8" />
+ <meta name="robots" content="NONE,NOARCHIVE" />
+ <title>$exception_type at $ctx.path</title>
+ <style type="text/css">
+ html * { padding:0; margin:0; }
+ body * { padding:10px 20px; }
+ body * * { padding:0; }
+ body { font:small sans-serif; }
+ body>div { border-bottom:1px solid #ddd; }
+ h1 { font-weight:normal; }
+ h2 { margin-bottom:.8em; }
+ h2 span { font-size:80%; color:#666; font-weight:normal; }
+ h3 { margin:1em 0 .5em 0; }
+ h4 { margin:0 0 .5em 0; font-weight: normal; }
+ table {
+ border:1px solid #ccc; border-collapse: collapse; background:white; }
+ tbody td, tbody th { vertical-align:top; padding:2px 3px; }
+ thead th {
+ padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
+ font-weight:normal; font-size:11px; border:1px solid #ddd; }
+ tbody th { text-align:right; color:#666; padding-right:.5em; }
+ table.vars { margin:5px 0 2px 40px; }
+ table.vars td, table.req td { font-family:monospace; }
+ table td.code { width:100%;}
+ table td.code div { overflow:hidden; }
+ table.source th { color:#666; }
+ table.source td {
+ font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
+ ul.traceback { list-style-type:none; }
+ ul.traceback li.frame { margin-bottom:1em; }
+ div.context { margin: 10px 0; }
+ div.context ol {
+ padding-left:30px; margin:0 10px; list-style-position: inside; }
+ div.context ol li {
+ font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
+ div.context ol.context-line li { color:black; background-color:#ccc; }
+ div.context ol.context-line li span { float: right; }
+ div.commands { margin-left: 40px; }
+ div.commands a { color:black; text-decoration:none; }
+ #summary { background: #ffc; }
+ #summary h2 { font-weight: normal; color: #666; }
+ #explanation { background:#eee; }
+ #template, #template-not-exist { background:#f6f6f6; }
+ #template-not-exist ul { margin: 0 0 0 20px; }
+ #traceback { background:#eee; }
+ #requestinfo { background:#f6f6f6; padding-left:120px; }
+ #summary table { border:none; background:transparent; }
+ #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
+ #requestinfo h3 { margin-bottom:-1em; }
+ .error { background: #ffc; }
+ .specific { color:#cc3300; font-weight:bold; }
+ </style>
+ <script type="text/javascript">
+ //<!--
+ function getElementsByClassName(oElm, strTagName, strClassName){
+ // Written by Jonathan Snook, http://www.snook.ca/jon;
+ // Add-ons by Robert Nyman, http://www.robertnyman.com
+ var arrElements = (strTagName == "*" && document.all)? document.all :
+ oElm.getElementsByTagName(strTagName);
+ var arrReturnElements = new Array();
+ strClassName = strClassName.replace(/\-/g, "\\-");
+ var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$$)");
+ var oElement;
+ for(var i=0; i<arrElements.length; i++){
+ oElement = arrElements[i];
+ if(oRegExp.test(oElement.className)){
+ arrReturnElements.push(oElement);
+ }
+ }
+ return (arrReturnElements)
+ }
+ function hideAll(elems) {
+ for (var e = 0; e < elems.length; e++) {
+ elems[e].style.display = 'none';
+ }
+ }
+ window.onload = function() {
+ hideAll(getElementsByClassName(document, 'table', 'vars'));
+ hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
+ hideAll(getElementsByClassName(document, 'ol', 'post-context'));
+ }
+ function toggle() {
+ for (var i = 0; i < arguments.length; i++) {
+ var e = document.getElementById(arguments[i]);
+ if (e) {
+ e.style.display = e.style.display == 'none' ? 'block' : 'none';
+ }
+ }
+ return false;
+ }
+ function varToggle(link, id) {
+ toggle('v' + id);
+ var s = link.getElementsByTagName('span')[0];
+ var uarr = String.fromCharCode(0x25b6);
+ var darr = String.fromCharCode(0x25bc);
+ s.innerHTML = s.innerHTML == uarr ? darr : uarr;
+ return false;
+ }
+ //-->
+ </script>
+</head>
+<body>
+
+$def dicttable (d, kls='req', id=None):
+ $ items = d and d.items() or []
+ $items.sort()
+ $:dicttable_items(items, kls, id)
+
+$def dicttable_items(items, kls='req', id=None):
+ $if items:
+ <table class="$kls"
+ $if id: id="$id"
+ ><thead><tr><th>Variable</th><th>Value</th></tr></thead>
+ <tbody>
+ $for k, v in items:
+ <tr><td>$k</td><td class="code"><div>$prettify(v)</div></td></tr>
+ </tbody>
+ </table>
+ $else:
+ <p>No data.</p>
+
+<div id="summary">
+ <h1>$exception_type at $ctx.path</h1>
+ <h2>$exception_value</h2>
+ <table><tr>
+ <th>Python</th>
+ <td>$frames[0].filename in $frames[0].function, line $frames[0].lineno</td>
+ </tr><tr>
+ <th>Web</th>
+ <td>$ctx.method $ctx.home$ctx.path</td>
+ </tr></table>
+</div>
+<div id="traceback">
+<h2>Traceback <span>(innermost first)</span></h2>
+<ul class="traceback">
+$for frame in frames:
+ <li class="frame">
+ <code>$frame.filename</code> in <code>$frame.function</code>
+ $if frame.context_line:
+ <div class="context" id="c$frame.id">
+ $if frame.pre_context:
+ <ol start="$frame.pre_context_lineno" class="pre-context" id="pre$frame.id">
+ $for line in frame.pre_context:
+ <li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>
+ </ol>
+ <ol start="$frame.lineno" class="context-line"><li onclick="toggle('pre$frame.id', 'post$frame.id')">$frame.context_line <span>...</span></li></ol>
+ $if frame.post_context:
+ <ol start='${frame.lineno + 1}' class="post-context" id="post$frame.id">
+ $for line in frame.post_context:
+ <li onclick="toggle('pre$frame.id', 'post$frame.id')">$line</li>
+ </ol>
+ </div>
+
+ $if frame.vars:
+ <div class="commands">
+ <a href='#' onclick="return varToggle(this, '$frame.id')"><span>▶</span> Local vars</a>
+ $# $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
+ </div>
+ $:dicttable(frame.vars, kls='vars', id=('v' + str(frame.id)))
+ </li>
+ </ul>
+</div>
+
+<div id="requestinfo">
+$if ctx.output or ctx.headers:
+ <h2>Response so far</h2>
+ <h3>HEADERS</h3>
+ $:dicttable_items(ctx.headers)
+
+ <h3>BODY</h3>
+ <p class="req" style="padding-bottom: 2em"><code>
+ $ctx.output
+ </code></p>
+
+<h2>Request information</h2>
+
+<h3>INPUT</h3>
+$:dicttable(web.input())
+
+<h3 id="cookie-info">COOKIES</h3>
+$:dicttable(web.cookies())
+
+<h3 id="meta-info">META</h3>
+$ newctx = [(k, v) for (k, v) in ctx.iteritems() if not k.startswith('_') and not isinstance(v, dict)]
+$:dicttable(dict(newctx))
+
+<h3 id="meta-info">ENVIRONMENT</h3>
+$:dicttable(ctx.env)
+</div>
+
+<div id="explanation">
+ <p>
+ You're seeing this error because you have <code>web.config.debug</code>
+ set to <code>True</code>. Set that to <code>False</code> if you don't to see this.
+ </p>
+</div>
+
+</body>
+</html>
+"""
+
+djangoerror_r = None
+
+def djangoerror():
+ def _get_lines_from_file(filename, lineno, context_lines):
+ """
+ Returns context_lines before and after lineno from file.
+ Returns (pre_context_lineno, pre_context, context_line, post_context).
+ """
+ try:
+ source = open(filename).readlines()
+ lower_bound = max(0, lineno - context_lines)
+ upper_bound = lineno + context_lines
+
+ pre_context = \
+ [line.strip('\n') for line in source[lower_bound:lineno]]
+ context_line = source[lineno].strip('\n')
+ post_context = \
+ [line.strip('\n') for line in source[lineno + 1:upper_bound]]
+
+ return lower_bound, pre_context, context_line, post_context
+ except (OSError, IOError):
+ return None, [], None, []
+
+ exception_type, exception_value, tback = sys.exc_info()
+ frames = []
+ while tback is not None:
+ filename = tback.tb_frame.f_code.co_filename
+ function = tback.tb_frame.f_code.co_name
+ lineno = tback.tb_lineno - 1
+ pre_context_lineno, pre_context, context_line, post_context = \
+ _get_lines_from_file(filename, lineno, 7)
+ frames.append(web.storage({
+ 'tback': tback,
+ 'filename': filename,
+ 'function': function,
+ 'lineno': lineno,
+ 'vars': tback.tb_frame.f_locals,
+ 'id': id(tback),
+ 'pre_context': pre_context,
+ 'context_line': context_line,
+ 'post_context': post_context,
+ 'pre_context_lineno': pre_context_lineno,
+ }))
+ tback = tback.tb_next
+ frames.reverse()
+ urljoin = urlparse.urljoin
+ def prettify(x):
+ try:
+ out = pprint.pformat(x)
+ except Exception, e:
+ out = '[could not display: <' + e.__class__.__name__ + \
+ ': '+str(e)+'>]'
+ return out
+
+ global djangoerror_r
+ if djangoerror_r is None:
+ djangoerror_r = Template(djangoerror_t, filename=__file__, filter=websafe)
+
+ t = djangoerror_r
+ globals = {'ctx': web.ctx, 'web':web, 'dict':dict, 'str':str, 'prettify': prettify}
+ t.t.func_globals.update(globals)
+ return t(exception_type, exception_value, frames)
+
+def debugerror():
+ """
+ A replacement for `internalerror` that presents a nice page with lots
+ of debug information for the programmer.
+
+ (Based on the beautiful 500 page from [Django](http://djangoproject.com/),
+ designed by [Wilson Miner](http://wilsonminer.com/).)
+ """
+ return web._InternalError(djangoerror())
+
+def emailerrors(to_address, olderror, from_address=None):
+ """
+ Wraps the old `internalerror` handler (pass as `olderror`) to
+ additionally email all errors to `to_address`, to aid in
+ debugging production websites.
+
+ Emails contain a normal text traceback as well as an
+ attachment containing the nice `debugerror` page.
+ """
+ from_address = from_address or to_address
+
+ def emailerrors_internal():
+ error = olderror()
+ tb = sys.exc_info()
+ error_name = tb[0]
+ error_value = tb[1]
+ tb_txt = ''.join(traceback.format_exception(*tb))
+ path = web.ctx.path
+ request = web.ctx.method + ' ' + web.ctx.home + web.ctx.fullpath
+ text = ("""\
+------here----
+Content-Type: text/plain
+Content-Disposition: inline
+
+%(request)s
+
+%(tb_txt)s
+
+------here----
+Content-Type: text/html; name="bug.html"
+Content-Disposition: attachment; filename="bug.html"
+
+""" % locals()) + str(djangoerror())
+ sendmail(
+ "your buggy site <%s>" % from_address,
+ "the bugfixer <%s>" % to_address,
+ "bug: %(error_name)s: %(error_value)s (%(path)s)" % locals(),
+ text,
+ headers={'Content-Type': 'multipart/mixed; boundary="----here----"'})
+ return error
+
+ return emailerrors_internal
+
+if __name__ == "__main__":
+ urls = (
+ '/', 'index'
+ )
+ from application import application
+ app = application(urls, globals())
+ app.internalerror = debugerror
+
+ class index:
+ def GET(self):
+ thisdoesnotexist
+
+ app.run()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/form.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,264 @@
+"""
+HTML forms
+(part of web.py)
+"""
+
+import copy, re
+import webapi as web
+import utils, net
+
+def attrget(obj, attr, value=None):
+ if hasattr(obj, 'has_key') and obj.has_key(attr): return obj[attr]
+ if hasattr(obj, attr): return getattr(obj, attr)
+ return value
+
+class Form:
+ r"""
+ HTML form.
+
+ >>> f = Form(Textbox("x"))
+ >>> f.render()
+ '<table>\n <tr><th><label for="x">x</label></th><td><input type="text" name="x" id="x" /></td></tr>\n</table>'
+ """
+ def __init__(self, *inputs, **kw):
+ self.inputs = inputs
+ self.valid = True
+ self.note = None
+ self.validators = kw.pop('validators', [])
+
+ def __call__(self, x=None):
+ o = copy.deepcopy(self)
+ if x: o.validates(x)
+ return o
+
+ def render(self):
+ out = ''
+ out += self.rendernote(self.note)
+ out += '<table>\n'
+ for i in self.inputs:
+ out += ' <tr><th><label for="%s">%s</label></th>' % (i.id, net.websafe(i.description))
+ out += "<td>"+i.pre+i.render()+i.post+"</td></tr>\n"
+ out += "</table>"
+ return out
+
+ def render_css(self):
+ out = []
+ out.append(self.rendernote(self.note))
+ for i in self.inputs:
+ out.append('<label for="%s">%s</label>' % (i.id, net.websafe(i.description)))
+ out.append(i.pre)
+ out.append(i.render())
+ out.append(i.post)
+ out.append('\n')
+ return ''.join(out)
+
+ def rendernote(self, note):
+ if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
+ else: return ""
+
+ def validates(self, source=None, _validate=True, **kw):
+ source = source or kw or web.input()
+ out = True
+ for i in self.inputs:
+ v = attrget(source, i.name)
+ if _validate:
+ out = i.validate(v) and out
+ else:
+ i.value = v
+ if _validate:
+ out = out and self._validate(source)
+ self.valid = out
+ return out
+
+ def _validate(self, value):
+ self.value = value
+ for v in self.validators:
+ if not v.valid(value):
+ self.note = v.msg
+ return False
+ return True
+
+ def fill(self, source=None, **kw):
+ return self.validates(source, _validate=False, **kw)
+
+ def __getitem__(self, i):
+ for x in self.inputs:
+ if x.name == i: return x
+ raise KeyError, i
+
+ def __getattr__(self, name):
+ # don't interfere with deepcopy
+ inputs = self.__dict__.get('inputs') or []
+ for x in inputs:
+ if x.name == name: return x
+ raise AttributeError, name
+
+ def get(self, i, default=None):
+ try:
+ return self[i]
+ except KeyError:
+ return default
+
+ def _get_d(self): #@@ should really be form.attr, no?
+ return utils.storage([(i.name, i.value) for i in self.inputs])
+ d = property(_get_d)
+
+class Input(object):
+ def __init__(self, name, *validators, **attrs):
+ self.description = attrs.pop('description', name)
+ self.value = attrs.pop('value', None)
+ self.pre = attrs.pop('pre', "")
+ self.post = attrs.pop('post', "")
+ self.id = attrs.setdefault('id', name)
+ if 'class_' in attrs:
+ attrs['class'] = attrs['class_']
+ del attrs['class_']
+ self.name, self.validators, self.attrs, self.note = name, validators, attrs, None
+
+ def validate(self, value):
+ self.value = value
+ for v in self.validators:
+ if not v.valid(value):
+ self.note = v.msg
+ return False
+ return True
+
+ def render(self): raise NotImplementedError
+
+ def rendernote(self, note):
+ if note: return '<strong class="wrong">%s</strong>' % net.websafe(note)
+ else: return ""
+
+ def addatts(self):
+ str = ""
+ for (n, v) in self.attrs.items():
+ str += ' %s="%s"' % (n, net.websafe(v))
+ return str
+
+#@@ quoting
+
+class Textbox(Input):
+ def render(self, shownote=True):
+ x = '<input type="text" name="%s"' % net.websafe(self.name)
+ if self.value: x += ' value="%s"' % net.websafe(self.value)
+ x += self.addatts()
+ x += ' />'
+ if shownote:
+ x += self.rendernote(self.note)
+ return x
+
+class Password(Input):
+ def render(self):
+ x = '<input type="password" name="%s"' % net.websafe(self.name)
+ if self.value: x += ' value="%s"' % net.websafe(self.value)
+ x += self.addatts()
+ x += ' />'
+ x += self.rendernote(self.note)
+ return x
+
+class Textarea(Input):
+ def render(self):
+ x = '<textarea name="%s"' % net.websafe(self.name)
+ x += self.addatts()
+ x += '>'
+ if self.value is not None: x += net.websafe(self.value)
+ x += '</textarea>'
+ x += self.rendernote(self.note)
+ return x
+
+class Dropdown(Input):
+ def __init__(self, name, args, *validators, **attrs):
+ self.args = args
+ super(Dropdown, self).__init__(name, *validators, **attrs)
+
+ def render(self):
+ x = '<select name="%s"%s>\n' % (net.websafe(self.name), self.addatts())
+ for arg in self.args:
+ if isinstance(arg, (tuple, list)):
+ value, desc= arg
+ else:
+ value, desc = arg, arg
+
+ if self.value == value: select_p = ' selected="selected"'
+ else: select_p = ''
+ x += ' <option %s value="%s">%s</option>\n' % (select_p, net.websafe(value), net.websafe(desc))
+ x += '</select>\n'
+ x += self.rendernote(self.note)
+ return x
+
+class Radio(Input):
+ def __init__(self, name, args, *validators, **attrs):
+ self.args = args
+ super(Radio, self).__init__(name, *validators, **attrs)
+
+ def render(self):
+ x = '<span>'
+ for arg in self.args:
+ if self.value == arg: select_p = ' checked="checked"'
+ else: select_p = ''
+ x += '<input type="radio" name="%s" value="%s"%s%s /> %s ' % (net.websafe(self.name), net.websafe(arg), select_p, self.addatts(), net.websafe(arg))
+ x += '</span>'
+ x += self.rendernote(self.note)
+ return x
+
+class Checkbox(Input):
+ def render(self):
+ x = '<input name="%s" type="checkbox"' % net.websafe(self.name)
+ if self.value: x += ' checked="checked"'
+ x += self.addatts()
+ x += ' />'
+ x += self.rendernote(self.note)
+ return x
+
+class Button(Input):
+ def __init__(self, name, *validators, **attrs):
+ super(Button, self).__init__(name, *validators, **attrs)
+ self.description = ""
+
+ def render(self):
+ safename = net.websafe(self.name)
+ x = '<button name="%s"%s>%s</button>' % (safename, self.addatts(), safename)
+ x += self.rendernote(self.note)
+ return x
+
+class Hidden(Input):
+ def __init__(self, name, *validators, **attrs):
+ super(Hidden, self).__init__(name, *validators, **attrs)
+ # it doesnt make sence for a hidden field to have description
+ self.description = ""
+
+ def render(self):
+ x = '<input type="hidden" name="%s"' % net.websafe(self.name)
+ if self.value: x += ' value="%s"' % net.websafe(self.value)
+ x += self.addatts()
+ x += ' />'
+ return x
+
+class File(Input):
+ def render(self):
+ x = '<input type="file" name="%s"' % net.websafe(self.name)
+ x += self.addatts()
+ x += ' />'
+ x += self.rendernote(self.note)
+ return x
+
+class Validator:
+ def __deepcopy__(self, memo): return copy.copy(self)
+ def __init__(self, msg, test, jstest=None): utils.autoassign(self, locals())
+ def valid(self, value):
+ try: return self.test(value)
+ except: return False
+
+notnull = Validator("Required", bool)
+
+class regexp(Validator):
+ def __init__(self, rexp, msg):
+ self.rexp = re.compile(rexp)
+ self.msg = msg
+
+ def valid(self, value):
+ return bool(self.rexp.match(value))
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/http.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,163 @@
+"""
+HTTP Utilities
+(from web.py)
+"""
+
+__all__ = [
+ "expires", "lastmodified",
+ "prefixurl", "modified",
+ "write",
+ "changequery", "url",
+ "profiler",
+]
+
+import sys, os, threading, urllib, urlparse
+try: import datetime
+except ImportError: pass
+import net, utils, webapi as web
+
+def prefixurl(base=''):
+ """
+ Sorry, this function is really difficult to explain.
+ Maybe some other time.
+ """
+ url = web.ctx.path.lstrip('/')
+ for i in xrange(url.count('/')):
+ base += '../'
+ if not base:
+ base = './'
+ return base
+
+def expires(delta):
+ """
+ Outputs an `Expires` header for `delta` from now.
+ `delta` is a `timedelta` object or a number of seconds.
+ """
+ if isinstance(delta, (int, long)):
+ delta = datetime.timedelta(seconds=delta)
+ date_obj = datetime.datetime.utcnow() + delta
+ web.header('Expires', net.httpdate(date_obj))
+
+def lastmodified(date_obj):
+ """Outputs a `Last-Modified` header for `datetime`."""
+ web.header('Last-Modified', net.httpdate(date_obj))
+
+def modified(date=None, etag=None):
+ """
+ Checks to see if the page has been modified since the version in the
+ requester's cache.
+
+ When you publish pages, you can include `Last-Modified` and `ETag`
+ with the date the page was last modified and an opaque token for
+ the particular version, respectively. When readers reload the page,
+ the browser sends along the modification date and etag value for
+ the version it has in its cache. If the page hasn't changed,
+ the server can just return `304 Not Modified` and not have to
+ send the whole page again.
+
+ This function takes the last-modified date `date` and the ETag `etag`
+ and checks the headers to see if they match. If they do, it returns
+ `True` and sets the response status to `304 Not Modified`. It also
+ sets `Last-Modified and `ETag` output headers.
+ """
+ try:
+ from __builtin__ import set
+ except ImportError:
+ # for python 2.3
+ from sets import Set as set
+
+ n = set([x.strip('" ') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')])
+ m = net.parsehttpdate(web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
+ validate = False
+ if etag:
+ if '*' in n or etag in n:
+ validate = True
+ if date and m:
+ # we subtract a second because
+ # HTTP dates don't have sub-second precision
+ if date-datetime.timedelta(seconds=1) <= m:
+ validate = True
+
+ if validate: web.ctx.status = '304 Not Modified'
+ if date: lastmodified(date)
+ if etag: web.header('ETag', '"' + etag + '"')
+ return not validate
+
+def write(cgi_response):
+ """
+ Converts a standard CGI-style string response into `header` and
+ `output` calls.
+ """
+ cgi_response = str(cgi_response)
+ cgi_response.replace('\r\n', '\n')
+ head, body = cgi_response.split('\n\n', 1)
+ lines = head.split('\n')
+
+ for line in lines:
+ if line.isspace():
+ continue
+ hdr, value = line.split(":", 1)
+ value = value.strip()
+ if hdr.lower() == "status":
+ web.ctx.status = value
+ else:
+ web.header(hdr, value)
+
+ web.output(body)
+
+def urlencode(query):
+ """
+ Same as urllib.urlencode, but supports unicode strings.
+
+ >>> urlencode({'text':'foo bar'})
+ 'text=foo+bar'
+ """
+ query = dict([(k, utils.utf8(v)) for k, v in query.items()])
+ return urllib.urlencode(query)
+
+def changequery(query=None, **kw):
+ """
+ Imagine you're at `/foo?a=1&b=2`. Then `changequery(a=3)` will return
+ `/foo?a=3&b=2` -- the same URL but with the arguments you requested
+ changed.
+ """
+ if query is None:
+ query = web.input(_method='get')
+ for k, v in kw.iteritems():
+ if v is None:
+ query.pop(k, None)
+ else:
+ query[k] = v
+ out = web.ctx.path
+ if query:
+ out += '?' + urlencode(query)
+ return out
+
+def url(path=None, **kw):
+ """
+ Makes url by concatinating web.ctx.homepath and path and the
+ query string created using the arguments.
+ """
+ if path is None:
+ path = web.ctx.path
+ if path.startswith("/"):
+ out = web.ctx.homepath + path
+ else:
+ out = path
+
+ if kw:
+ out += '?' + urlencode(kw)
+
+ return out
+
+def profiler(app):
+ """Outputs basic profiling information at the bottom of each response."""
+ from utils import profile
+ def profile_internal(e, o):
+ out, result = profile(app)(e, o)
+ return list(out) + ['<pre>' + net.websafe(result) + '</pre>']
+ return profile_internal
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/httpserver.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,225 @@
+__all__ = ["runsimple"]
+
+import sys, os
+import webapi as web
+import net
+import utils
+
+def runbasic(func, server_address=("0.0.0.0", 8080)):
+ """
+ Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
+ is hosted statically.
+
+ Based on [WsgiServer][ws] from [Colin Stewart][cs].
+
+ [ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
+ [cs]: http://www.owlfish.com/
+ """
+ # Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
+ # Modified somewhat for simplicity
+ # Used under the modified BSD license:
+ # http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
+
+ import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
+ import socket, errno
+ import traceback
+
+ class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+ def run_wsgi_app(self):
+ protocol, host, path, parameters, query, fragment = \
+ urlparse.urlparse('http://dummyhost%s' % self.path)
+
+ # we only use path, query
+ env = {'wsgi.version': (1, 0)
+ ,'wsgi.url_scheme': 'http'
+ ,'wsgi.input': self.rfile
+ ,'wsgi.errors': sys.stderr
+ ,'wsgi.multithread': 1
+ ,'wsgi.multiprocess': 0
+ ,'wsgi.run_once': 0
+ ,'REQUEST_METHOD': self.command
+ ,'REQUEST_URI': self.path
+ ,'PATH_INFO': path
+ ,'QUERY_STRING': query
+ ,'CONTENT_TYPE': self.headers.get('Content-Type', '')
+ ,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
+ ,'REMOTE_ADDR': self.client_address[0]
+ ,'SERVER_NAME': self.server.server_address[0]
+ ,'SERVER_PORT': str(self.server.server_address[1])
+ ,'SERVER_PROTOCOL': self.request_version
+ }
+
+ for http_header, http_value in self.headers.items():
+ env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
+ http_value
+
+ # Setup the state
+ self.wsgi_sent_headers = 0
+ self.wsgi_headers = []
+
+ try:
+ # We have there environment, now invoke the application
+ result = self.server.app(env, self.wsgi_start_response)
+ try:
+ try:
+ for data in result:
+ if data:
+ self.wsgi_write_data(data)
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except socket.error, socket_err:
+ # Catch common network errors and suppress them
+ if (socket_err.args[0] in \
+ (errno.ECONNABORTED, errno.EPIPE)):
+ return
+ except socket.timeout, socket_timeout:
+ return
+ except:
+ print >> web.debug, traceback.format_exc(),
+
+ if (not self.wsgi_sent_headers):
+ # We must write out something!
+ self.wsgi_write_data(" ")
+ return
+
+ do_POST = run_wsgi_app
+ do_PUT = run_wsgi_app
+ do_DELETE = run_wsgi_app
+
+ def do_GET(self):
+ if self.path.startswith('/static/'):
+ SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
+ else:
+ self.run_wsgi_app()
+
+ def wsgi_start_response(self, response_status, response_headers,
+ exc_info=None):
+ if (self.wsgi_sent_headers):
+ raise Exception \
+ ("Headers already sent and start_response called again!")
+ # Should really take a copy to avoid changes in the application....
+ self.wsgi_headers = (response_status, response_headers)
+ return self.wsgi_write_data
+
+ def wsgi_write_data(self, data):
+ if (not self.wsgi_sent_headers):
+ status, headers = self.wsgi_headers
+ # Need to send header prior to data
+ status_code = status[:status.find(' ')]
+ status_msg = status[status.find(' ') + 1:]
+ self.send_response(int(status_code), status_msg)
+ for header, value in headers:
+ self.send_header(header, value)
+ self.end_headers()
+ self.wsgi_sent_headers = 1
+ # Send the data
+ self.wfile.write(data)
+
+ class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ def __init__(self, func, server_address):
+ BaseHTTPServer.HTTPServer.__init__(self,
+ server_address,
+ WSGIHandler)
+ self.app = func
+ self.serverShuttingDown = 0
+
+ print "http://%s:%d/" % server_address
+ WSGIServer(func, server_address).serve_forever()
+
+def runsimple(func, server_address=("0.0.0.0", 8080)):
+ """
+ Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
+ The directory `static/` is hosted statically.
+
+ [cp]: http://www.cherrypy.org
+ """
+ from wsgiserver import CherryPyWSGIServer
+ from SimpleHTTPServer import SimpleHTTPRequestHandler
+ from BaseHTTPServer import BaseHTTPRequestHandler
+
+ class StaticApp(SimpleHTTPRequestHandler):
+ """WSGI application for serving static files."""
+ def __init__(self, environ, start_response):
+ self.headers = []
+ self.environ = environ
+ self.start_response = start_response
+
+ def send_response(self, status, msg=""):
+ self.status = str(status) + " " + msg
+
+ def send_header(self, name, value):
+ self.headers.append((name, value))
+
+ def end_headers(self):
+ pass
+
+ def log_message(*a): pass
+
+ def __iter__(self):
+ environ = self.environ
+
+ self.path = environ.get('PATH_INFO', '')
+ self.client_address = environ.get('REMOTE_ADDR','-'), \
+ environ.get('REMOTE_PORT','-')
+ self.command = environ.get('REQUEST_METHOD', '-')
+
+ from cStringIO import StringIO
+ self.wfile = StringIO() # for capturing error
+
+ f = self.send_head()
+ self.start_response(self.status, self.headers)
+
+ if f:
+ block_size = 16 * 1024
+ while True:
+ buf = f.read(block_size)
+ if not buf:
+ break
+ yield buf
+ f.close()
+ else:
+ value = self.wfile.getvalue()
+ yield value
+
+ class WSGIWrapper(BaseHTTPRequestHandler):
+ """WSGI wrapper for logging the status and serving static files."""
+ def __init__(self, app):
+ self.app = app
+ self.format = '%s - - [%s] "%s %s %s" - %s'
+
+ def __call__(self, environ, start_response):
+ def xstart_response(status, response_headers, *args):
+ write = start_response(status, response_headers, *args)
+ self.log(status, environ)
+ return write
+
+ path = environ.get('PATH_INFO', '')
+ if path.startswith('/static/'):
+ return StaticApp(environ, xstart_response)
+ else:
+ return self.app(environ, xstart_response)
+
+ def log(self, status, environ):
+ outfile = environ.get('wsgi.errors', web.debug)
+ req = environ.get('PATH_INFO', '_')
+ protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
+ method = environ.get('REQUEST_METHOD', '-')
+ host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
+ environ.get('REMOTE_PORT','-'))
+
+ #@@ It is really bad to extend from
+ #@@ BaseHTTPRequestHandler just for this method
+ time = self.log_date_time_string()
+
+ msg = self.format % (host, time, protocol, method, req, status)
+ print >> outfile, utils.safestr(msg)
+
+ func = WSGIWrapper(func)
+ server = CherryPyWSGIServer(server_address, func, server_name="localhost")
+
+ print "http://%s:%d/" % server_address
+ try:
+ server.start()
+ except KeyboardInterrupt:
+ server.stop()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/net.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,190 @@
+"""
+Network Utilities
+(from web.py)
+"""
+
+__all__ = [
+ "validipaddr", "validipport", "validip", "validaddr",
+ "urlquote",
+ "httpdate", "parsehttpdate",
+ "htmlquote", "htmlunquote", "websafe",
+]
+
+import urllib, time
+try: import datetime
+except ImportError: pass
+
+def validipaddr(address):
+ """
+ Returns True if `address` is a valid IPv4 address.
+
+ >>> validipaddr('192.168.1.1')
+ True
+ >>> validipaddr('192.168.1.800')
+ False
+ >>> validipaddr('192.168.1')
+ False
+ """
+ try:
+ octets = address.split('.')
+ if len(octets) != 4:
+ return False
+ for x in octets:
+ if not (0 <= int(x) <= 255):
+ return False
+ except ValueError:
+ return False
+ return True
+
+def validipport(port):
+ """
+ Returns True if `port` is a valid IPv4 port.
+
+ >>> validipport('9000')
+ True
+ >>> validipport('foo')
+ False
+ >>> validipport('1000000')
+ False
+ """
+ try:
+ if not (0 <= int(port) <= 65535):
+ return False
+ except ValueError:
+ return False
+ return True
+
+def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
+ """Returns `(ip_address, port)` from string `ip_addr_port`"""
+ addr = defaultaddr
+ port = defaultport
+
+ ip = ip.split(":", 1)
+ if len(ip) == 1:
+ if not ip[0]:
+ pass
+ elif validipaddr(ip[0]):
+ addr = ip[0]
+ elif validipport(ip[0]):
+ port = int(ip[0])
+ else:
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ elif len(ip) == 2:
+ addr, port = ip
+ if not validipaddr(addr) and validipport(port):
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ port = int(port)
+ else:
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ return (addr, port)
+
+def validaddr(string_):
+ """
+ Returns either (ip_address, port) or "/path/to/socket" from string_
+
+ >>> validaddr('/path/to/socket')
+ '/path/to/socket'
+ >>> validaddr('8000')
+ ('0.0.0.0', 8000)
+ >>> validaddr('127.0.0.1')
+ ('127.0.0.1', 8080)
+ >>> validaddr('127.0.0.1:8000')
+ ('127.0.0.1', 8000)
+ >>> validaddr('fff')
+ Traceback (most recent call last):
+ ...
+ ValueError: fff is not a valid IP address/port
+ """
+ if '/' in string_:
+ return string_
+ else:
+ return validip(string_)
+
+def urlquote(val):
+ """
+ Quotes a string for use in a URL.
+
+ >>> urlquote('://?f=1&j=1')
+ '%3A//%3Ff%3D1%26j%3D1'
+ >>> urlquote(None)
+ ''
+ >>> urlquote(u'\u203d')
+ '%E2%80%BD'
+ """
+ if val is None: return ''
+ if not isinstance(val, unicode): val = str(val)
+ else: val = val.encode('utf-8')
+ return urllib.quote(val)
+
+def httpdate(date_obj):
+ """
+ Formats a datetime object for use in HTTP headers.
+
+ >>> import datetime
+ >>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
+ 'Thu, 01 Jan 1970 01:01:01 GMT'
+ """
+ return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
+
+def parsehttpdate(string_):
+ """
+ Parses an HTTP date into a datetime object.
+
+ >>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
+ datetime.datetime(1970, 1, 1, 1, 1, 1)
+ """
+ try:
+ t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
+ except ValueError:
+ return None
+ return datetime.datetime(*t[:6])
+
+def htmlquote(text):
+ """
+ Encodes `text` for raw use in HTML.
+
+ >>> htmlquote("<'&\\">")
+ '<'&">'
+ """
+ text = text.replace("&", "&") # Must be done first!
+ text = text.replace("<", "<")
+ text = text.replace(">", ">")
+ text = text.replace("'", "'")
+ text = text.replace('"', """)
+ return text
+
+def htmlunquote(text):
+ """
+ Decodes `text` that's HTML quoted.
+
+ >>> htmlunquote('<'&">')
+ '<\\'&">'
+ """
+ text = text.replace(""", '"')
+ text = text.replace("'", "'")
+ text = text.replace(">", ">")
+ text = text.replace("<", "<")
+ text = text.replace("&", "&") # Must be done last!
+ return text
+
+def websafe(val):
+ """
+ Converts `val` so that it's safe for use in UTF-8 HTML.
+
+ >>> websafe("<'&\\">")
+ '<'&">'
+ >>> websafe(None)
+ ''
+ >>> websafe(u'\u203d')
+ '\\xe2\\x80\\xbd'
+ """
+ if val is None:
+ return ''
+ if isinstance(val, unicode):
+ val = val.encode('utf-8')
+ val = str(val)
+ return htmlquote(val)
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/session.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,319 @@
+"""
+Session Management
+(from web.py)
+"""
+
+import os, time, datetime, random, base64
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+try:
+ import hashlib
+ sha1 = hashlib.sha1
+except ImportError:
+ import sha
+ sha1 = sha.new
+
+import utils
+import webapi as web
+
+__all__ = [
+ 'Session', 'SessionExpired',
+ 'Store', 'DiskStore', 'DBStore',
+]
+
+web.config.session_parameters = utils.storage({
+ 'cookie_name': 'webpy_session_id',
+ 'cookie_domain': None,
+ 'timeout': 86400, #24 * 60 * 60, # 24 hours in seconds
+ 'ignore_expiry': True,
+ 'ignore_change_ip': True,
+ 'secret_key': 'fLjUfxqXtfNoIldA0A0J',
+ 'expired_message': 'Session expired',
+})
+
+class SessionExpired(web.HTTPError):
+ def __init__(self, message):
+ web.HTTPError.__init__(self, '200 OK', {}, data=message)
+
+class Session(utils.ThreadedDict):
+ """Session management for web.py
+ """
+
+ def __init__(self, app, store, initializer=None):
+ self.__dict__['store'] = store
+ self.__dict__['_initializer'] = initializer
+ self.__dict__['_last_cleanup_time'] = 0
+ self.__dict__['_config'] = utils.storage(web.config.session_parameters)
+
+ if app:
+ app.add_processor(self._processor)
+
+ def _processor(self, handler):
+ """Application processor to setup session for every request"""
+ self._cleanup()
+ self._load()
+
+ try:
+ return handler()
+ finally:
+ self._save()
+
+ def _load(self):
+ """Load the session from the store, by the id from cookie"""
+ cookie_name = self._config.cookie_name
+ cookie_domain = self._config.cookie_domain
+ self.session_id = web.cookies().get(cookie_name)
+
+ # protection against session_id tampering
+ if self.session_id and not self._valid_session_id(self.session_id):
+ self.session_id = None
+
+ self._check_expiry()
+ if self.session_id:
+ d = self.store[self.session_id]
+ self.update(d)
+ self._validate_ip()
+
+ if not self.session_id:
+ self.session_id = self._generate_session_id()
+
+ if self._initializer:
+ if isinstance(self._initializer, dict):
+ self.update(self._initializer)
+ elif hasattr(self._initializer, '__call__'):
+ self._initializer()
+
+ self.ip = web.ctx.ip
+
+ def _check_expiry(self):
+ # check for expiry
+ if self.session_id and self.session_id not in self.store:
+ if self._config.ignore_expiry:
+ self.session_id = None
+ else:
+ return self.expired()
+
+ def _validate_ip(self):
+ # check for change of IP
+ if self.session_id and self.get('ip', None) != web.ctx.ip:
+ if not self._config.ignore_change_ip:
+ return self.expired()
+
+ def _save(self):
+ cookie_name = self._config.cookie_name
+ cookie_domain = self._config.cookie_domain
+ if not self.get('_killed'):
+ web.setcookie(cookie_name, self.session_id, domain=cookie_domain)
+ self.store[self.session_id] = dict(self)
+ else:
+ web.setcookie(cookie_name, self.session_id, expires=-1, domain=cookie_domain)
+
+ def _generate_session_id(self):
+ """Generate a random id for session"""
+
+ while True:
+ rand = os.urandom(16)
+ now = time.time()
+ secret_key = self._config.secret_key
+ session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key))
+ session_id = session_id.hexdigest()
+ if session_id not in self.store:
+ break
+ return session_id
+
+ def _valid_session_id(self, session_id):
+ rx = utils.re_compile('^[0-9a-fA-F]+$')
+ return rx.match(session_id)
+
+ def _cleanup(self):
+ """Cleanup the stored sessions"""
+ current_time = time.time()
+ timeout = self._config.timeout
+ if current_time - self._last_cleanup_time > timeout:
+ self.store.cleanup(timeout)
+ self.__dict__['_last_cleanup_time'] = current_time
+
+ def expired(self):
+ """Called when an expired session is atime"""
+ self._killed = True
+ self._save()
+ raise SessionExpired(self._config.expired_message)
+
+ def kill(self):
+ """Kill the session, make it no longer available"""
+ del self.store[self.session_id]
+ self._killed = True
+
+class Store:
+ """Base class for session stores"""
+
+ def __contains__(self, key):
+ raise NotImplementedError
+
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ def __setitem__(self, key, value):
+ raise NotImplementedError
+
+ def cleanup(self, timeout):
+ """removes all the expired sessions"""
+ raise NotImplementedError
+
+ def encode(self, session_dict):
+ """encodes session dict as a string"""
+ pickled = pickle.dumps(session_dict)
+ return base64.encodestring(pickled)
+
+ def decode(self, session_data):
+ """decodes the data to get back the session dict """
+ pickled = base64.decodestring(session_data)
+ return pickle.loads(pickled)
+
+class DiskStore(Store):
+ """
+ Store for saving a session on disk.
+
+ >>> import tempfile
+ >>> root = tempfile.mkdtemp()
+ >>> s = DiskStore(root)
+ >>> s['a'] = 'foo'
+ >>> s['a']
+ 'foo'
+ >>> time.sleep(0.01)
+ >>> s.cleanup(0.01)
+ >>> s['a']
+ Traceback (most recent call last):
+ ...
+ KeyError: 'a'
+ """
+ def __init__(self, root):
+ # if the storage root doesn't exists, create it.
+ if not os.path.exists(root):
+ os.mkdir(root)
+ self.root = root
+
+ def _get_path(self, key):
+ if os.path.sep in key:
+ raise ValueError, "Bad key: %s" % repr(key)
+ return os.path.join(self.root, key)
+
+ def __contains__(self, key):
+ path = self._get_path(key)
+ return os.path.exists(path)
+
+ def __getitem__(self, key):
+ path = self._get_path(key)
+ if os.path.exists(path):
+ pickled = open(path).read()
+ return self.decode(pickled)
+ else:
+ raise KeyError, key
+
+ def __setitem__(self, key, value):
+ path = self._get_path(key)
+ pickled = self.encode(value)
+ try:
+ f = open(path, 'w')
+ try:
+ f.write(pickled)
+ finally:
+ f.close()
+ except IOError:
+ pass
+
+ def __delitem__(self, key):
+ path = self._get_path(key)
+ if os.path.exists(path):
+ os.remove(path)
+
+ def cleanup(self, timeout):
+ now = time.time()
+ for f in os.listdir(self.root):
+ path = self._get_path(f)
+ atime = os.stat(path).st_atime
+ if now - atime > timeout :
+ os.remove(path)
+
+class DBStore(Store):
+ """Store for saving a session in database
+ Needs a table with the following columns:
+
+ session_id CHAR(128) UNIQUE NOT NULL,
+ atime DATETIME NOT NULL default current_timestamp,
+ data TEXT
+ """
+ def __init__(self, db, table_name):
+ self.db = db
+ self.table = table_name
+
+ def __contains__(self, key):
+ data = self.db.select(self.table, where="session_id=$key", vars=locals())
+ return bool(list(data))
+
+ def __getitem__(self, key):
+ now = datetime.datetime.now()
+ try:
+ s = self.db.select(self.table, where="session_id=$key", vars=locals())[0]
+ self.db.update(self.table, where="session_id=$key", atime=now, vars=locals())
+ except IndexError:
+ raise KeyError
+ else:
+ return self.decode(s.data)
+
+ def __setitem__(self, key, value):
+ pickled = self.encode(value)
+ now = datetime.datetime.now()
+ if key in self:
+ self.db.update(self.table, where="session_id=$key", data=pickled, vars=locals())
+ else:
+ self.db.insert(self.table, False, session_id=key, data=pickled )
+
+ def __delitem__(self, key):
+ self.db.delete(self.table, where="session_id=$key", vars=locals())
+
+ def cleanup(self, timeout):
+ timeout = datetime.timedelta(timeout/(24.0*60*60)) #timedelta takes numdays as arg
+ last_allowed_time = datetime.datetime.now() - timeout
+ self.db.delete(self.table, where="$last_allowed_time > atime", vars=locals())
+
+class ShelfStore:
+ """Store for saving session using `shelve` module.
+
+ import shelve
+ store = ShelfStore(shelve.open('session.shelf'))
+
+ XXX: is shelve thread-safe?
+ """
+ def __init__(self, shelf):
+ self.shelf = shelf
+
+ def __contains__(self, key):
+ return key in self.shelf
+
+ def __getitem__(self, key):
+ atime, v = self.shelf[key]
+ self[key] = v # update atime
+ return v
+
+ def __setitem__(self, key, value):
+ self.shelf[key] = time.time(), value
+
+ def __delitem__(self, key):
+ try:
+ del self.shelf[key]
+ except KeyError:
+ pass
+
+ def cleanup(self, timeout):
+ now = time.time()
+ for k in self.shelf.keys():
+ atime, v = self.shelf[k]
+ if now - atime > timeout :
+ del self[k]
+
+if __name__ == '__main__' :
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/template.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,1412 @@
+"""
+simple, elegant templating
+(part of web.py)
+
+Template design:
+
+Template string is split into tokens and the tokens are combined into nodes.
+Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
+for-loop, if-loop etc are block nodes, which contain multiple child nodes.
+
+Each node can emit some python string. python string emitted by the
+root node is validated for safeeval and executed using python in the given environment.
+
+Enough care is taken to make sure the generated code and the template has line to line match,
+so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
+
+Grammar:
+
+ template -> defwith sections
+ defwith -> '$def with (' arguments ')' | ''
+ sections -> section*
+ section -> block | assignment | line
+
+ assignment -> '$ ' <assignment expression>
+ line -> (text|expr)*
+ text -> <any characters other than $>
+ expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
+ pyexpr -> <python expression>
+
+"""
+
+__all__ = [
+ "Template",
+ "Render", "render", "frender",
+ "ParseError", "SecurityError",
+ "test"
+]
+
+import tokenize
+import os
+import glob
+import re
+
+from utils import storage, safeunicode, safestr, re_compile
+from webapi import config
+from net import websafe
+
+def splitline(text):
+ r"""
+ Splits the given text at newline.
+
+ >>> splitline('foo\nbar')
+ ('foo\n', 'bar')
+ >>> splitline('foo')
+ ('foo', '')
+ >>> splitline('')
+ ('', '')
+ """
+ index = text.find('\n') + 1
+ if index:
+ return text[:index], text[index:]
+ else:
+ return text, ''
+
+class Parser:
+ """Parser Base.
+ """
+ def __init__(self, text, name="<template>"):
+ self.text = text
+ self.name = name
+
+ def parse(self):
+ text = self.text
+ defwith, text = self.read_defwith(text)
+ suite = self.read_suite(text)
+ return DefwithNode(defwith, suite)
+
+ def read_defwith(self, text):
+ if text.startswith('$def with'):
+ defwith, text = splitline(text)
+ defwith = defwith[1:].strip() # strip $ and spaces
+ return defwith, text
+ else:
+ return '', text
+
+ def read_section(self, text):
+ r"""Reads one section from the given text.
+
+ section -> block | assignment | line
+
+ >>> read_section = Parser('').read_section
+ >>> read_section('foo\nbar\n')
+ (<line: [t'foo\n']>, 'bar\n')
+ >>> read_section('$ a = b + 1\nfoo\n')
+ (<assignment: 'a = b + 1'>, 'foo\n')
+
+ read_section('$for in range(10):\n hello $i\nfoo)
+ """
+ if text.lstrip(' ').startswith('$'):
+ index = text.index('$')
+ begin_indent, text2 = text[:index], text[index+1:]
+ ahead = self.python_lookahead(text2)
+
+ if ahead == 'var':
+ return self.read_var(text2)
+ elif ahead in STATEMENT_NODES:
+ return self.read_block_section(text2, begin_indent)
+ elif ahead in KEYWORDS:
+ return self.read_keyword(text2)
+ elif ahead.strip() == '':
+ # assignments starts with a space after $
+ # ex: $ a = b + 2
+ return self.read_assignment(text2)
+ return self.readline(text)
+
+ def read_var(self, text):
+ r"""Reads a var statement.
+
+ >>> read_var = Parser('').read_var
+ >>> read_var('var x=10\nfoo')
+ (<var: x = 10>, 'foo')
+ >>> read_var('var x: hello $name\nfoo')
+ (<var: x = join_('hello ', escape_(name, True))>, 'foo')
+ """
+ line, text = splitline(text)
+ tokens = self.python_tokens(line)
+ if len(tokens) < 4:
+ raise SyntaxError('Invalid var statement')
+
+ name = tokens[1]
+ sep = tokens[2]
+ value = line.split(sep, 1)[1].strip()
+
+ if sep == '=':
+ pass # no need to process value
+ elif sep == ':':
+ #@@ Hack for backward-compatability
+ if tokens[3] == '\n': # multi-line var statement
+ block, text = self.read_indented_block(text, ' ')
+ lines = [self.readline(x)[0] for x in block.splitlines()]
+ nodes = []
+ for x in lines:
+ nodes.extend(x.nodes)
+ nodes.append(TextNode('\n'))
+ else: # single-line var statement
+ linenode, _ = self.readline(value)
+ nodes = linenode.nodes
+ parts = [node.emit('') for node in nodes]
+ value = "join_(%s)" % ", ".join(parts)
+ else:
+ raise SyntaxError('Invalid var statement')
+ return VarNode(name, value), text
+
+ def read_suite(self, text):
+ r"""Reads section by section till end of text.
+
+ >>> read_suite = Parser('').read_suite
+ >>> read_suite('hello $name\nfoo\n')
+ [<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
+ """
+ sections = []
+ while text:
+ section, text = self.read_section(text)
+ sections.append(section)
+ return SuiteNode(sections)
+
+ def readline(self, text):
+ r"""Reads one line from the text. Newline is supressed if the line ends with \.
+
+ >>> readline = Parser('').readline
+ >>> readline('hello $name!\nbye!')
+ (<line: [t'hello ', $name, t'!\n']>, 'bye!')
+ >>> readline('hello $name!\\\nbye!')
+ (<line: [t'hello ', $name, t'!']>, 'bye!')
+ >>> readline('$f()\n\n')
+ (<line: [$f(), t'\n']>, '\n')
+ """
+ line, text = splitline(text)
+
+ # supress new line if line ends with \
+ if line.endswith('\\\n'):
+ line = line[:-2]
+
+ nodes = []
+ while line:
+ node, line = self.read_node(line)
+ nodes.append(node)
+
+ return LineNode(nodes), text
+
+ def read_node(self, text):
+ r"""Reads a node from the given text and returns the node and remaining text.
+
+ >>> read_node = Parser('').read_node
+ >>> read_node('hello $name')
+ (t'hello ', '$name')
+ >>> read_node('$name')
+ ($name, '')
+ """
+ if text.startswith('$$'):
+ return TextNode('$'), text[2:]
+ elif text.startswith('$#'): # comment
+ line, text = splitline(text)
+ return TextNode('\n'), text
+ elif text.startswith('$'):
+ text = text[1:] # strip $
+ if text.startswith(':'):
+ escape = False
+ text = text[1:] # strip :
+ else:
+ escape = True
+ return self.read_expr(text, escape=escape)
+ else:
+ return self.read_text(text)
+
+ def read_text(self, text):
+ r"""Reads a text node from the given text.
+
+ >>> read_text = Parser('').read_text
+ >>> read_text('hello $name')
+ (t'hello ', '$name')
+ """
+ index = text.find('$')
+ if index < 0:
+ return TextNode(text), ''
+ else:
+ return TextNode(text[:index]), text[index:]
+
+ def read_keyword(self, text):
+ line, text = splitline(text)
+ return CodeNode(None, line.strip() + "\n"), text
+
+ def read_expr(self, text, escape=True):
+ """Reads a python expression from the text and returns the expression and remaining text.
+
+ expr -> simple_expr | paren_expr
+ simple_expr -> id extended_expr
+ extended_expr -> attr_access | paren_expr extended_expr | ''
+ attr_access -> dot id extended_expr
+ paren_expr -> [ tokens ] | ( tokens ) | { tokens }
+
+ >>> read_expr = Parser('').read_expr
+ >>> read_expr("name")
+ ($name, '')
+ >>> read_expr("a.b and c")
+ ($a.b, ' and c')
+ >>> read_expr("a. b")
+ ($a, '. b')
+ >>> read_expr("name</h1>")
+ ($name, '</h1>')
+ >>> read_expr("(limit)ing")
+ ($(limit), 'ing')
+ >>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
+ ($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
+ """
+ def simple_expr():
+ identifier()
+ extended_expr()
+
+ def identifier():
+ tokens.next()
+
+ def extended_expr():
+ lookahead = tokens.lookahead()
+ if lookahead is None:
+ return
+ elif lookahead.value == '.':
+ attr_access()
+ elif lookahead.value in parens:
+ paren_expr()
+ extended_expr()
+ else:
+ return
+
+ def attr_access():
+ from token import NAME # python token constants
+ dot = tokens.lookahead()
+ if tokens.lookahead2().type == NAME:
+ tokens.next() # consume dot
+ identifier()
+ extended_expr()
+
+ def paren_expr():
+ begin = tokens.next().value
+ end = parens[begin]
+ while True:
+ if tokens.lookahead().value in parens:
+ paren_expr()
+ else:
+ t = tokens.next()
+ if t.value == end:
+ break
+ return
+
+ parens = {
+ "(": ")",
+ "[": "]",
+ "{": "}"
+ }
+
+ def get_tokens(text):
+ """tokenize text using python tokenizer.
+ Python tokenizer ignores spaces, but they might be important in some cases.
+ This function introduces dummy space tokens when it identifies any ignored space.
+ Each token is a storage object containing type, value, begin and end.
+ """
+ readline = iter([text]).next
+ end = None
+ for t in tokenize.generate_tokens(readline):
+ t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
+ if end is not None and end != t.begin:
+ _, x1 = end
+ _, x2 = t.begin
+ yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
+ end = t.end
+ yield t
+
+ class BetterIter:
+ """Iterator like object with 2 support for 2 look aheads."""
+ def __init__(self, items):
+ self.iteritems = iter(items)
+ self.items = []
+ self.position = 0
+ self.current_item = None
+
+ def lookahead(self):
+ if len(self.items) <= self.position:
+ self.items.append(self._next())
+ return self.items[self.position]
+
+ def _next(self):
+ try:
+ return self.iteritems.next()
+ except StopIteration:
+ return None
+
+ def lookahead2(self):
+ if len(self.items) <= self.position+1:
+ self.items.append(self._next())
+ return self.items[self.position+1]
+
+ def next(self):
+ self.current_item = self.lookahead()
+ self.position += 1
+ return self.current_item
+
+ tokens = BetterIter(get_tokens(text))
+
+ if tokens.lookahead().value in parens:
+ paren_expr()
+ else:
+ simple_expr()
+ row, col = tokens.current_item.end
+ return ExpressionNode(text[:col], escape=escape), text[col:]
+
+ def read_assignment(self, text):
+ r"""Reads assignment statement from text.
+
+ >>> read_assignment = Parser('').read_assignment
+ >>> read_assignment('a = b + 1\nfoo')
+ (<assignment: 'a = b + 1'>, 'foo')
+ """
+ line, text = splitline(text)
+ return AssignmentNode(line.strip()), text
+
+ def python_lookahead(self, text):
+ """Returns the first python token from the given text.
+
+ >>> python_lookahead = Parser('').python_lookahead
+ >>> python_lookahead('for i in range(10):')
+ 'for'
+ >>> python_lookahead('else:')
+ 'else'
+ >>> python_lookahead(' x = 1')
+ ' '
+ """
+ readline = iter([text]).next
+ tokens = tokenize.generate_tokens(readline)
+ return tokens.next()[1]
+
+ def python_tokens(self, text):
+ readline = iter([text]).next
+ tokens = tokenize.generate_tokens(readline)
+ return [t[1] for t in tokens]
+
+ def read_indented_block(self, text, indent):
+ r"""Read a block of text. A block is what typically follows a for or it statement.
+ It can be in the same line as that of the statement or an indented block.
+
+ >>> read_indented_block = Parser('').read_indented_block
+ >>> read_indented_block(' a\n b\nc', ' ')
+ ('a\nb\n', 'c')
+ >>> read_indented_block(' a\n b\n c\nd', ' ')
+ ('a\n b\nc\n', 'd')
+ """
+ if indent == '':
+ return '', text
+
+ block = ""
+ while True:
+ if text.startswith(indent):
+ line, text = splitline(text)
+ block += line[len(indent):]
+ else:
+ break
+ return block, text
+
+ def read_statement(self, text):
+ r"""Reads a python statement.
+
+ >>> read_statement = Parser('').read_statement
+ >>> read_statement('for i in range(10): hello $name')
+ ('for i in range(10):', ' hello $name')
+ """
+ tok = PythonTokenizer(text)
+ tok.consume_till(':')
+ return text[:tok.index], text[tok.index:]
+
+ def read_block_section(self, text, begin_indent=''):
+ r"""
+ >>> read_block_section = Parser('').read_block_section
+ >>> read_block_section('for i in range(10): hello $i\nfoo')
+ (<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
+ >>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
+ (<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
+ >>> read_block_section('for i in range(10):\n hello $i\nfoo')
+ (<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
+ """
+ line, text = splitline(text)
+ stmt, line = self.read_statement(line)
+ keyword = self.python_lookahead(stmt)
+
+ # if there is some thing left in the line
+ if line.strip():
+ block = line.lstrip()
+ else:
+ def find_indent(text):
+ rx = re_compile(' +')
+ match = rx.match(text)
+ first_indent = match and match.group(0)
+ return first_indent or ""
+
+ # find the indentation of the block by looking at the first line
+ first_indent = find_indent(text)[len(begin_indent):]
+ indent = begin_indent + min(first_indent, INDENT)
+
+ block, text = self.read_indented_block(text, indent)
+
+ return self.create_block_node(keyword, stmt, block, begin_indent), text
+
+ def create_block_node(self, keyword, stmt, block, begin_indent):
+ if keyword in STATEMENT_NODES:
+ return STATEMENT_NODES[keyword](stmt, block, begin_indent)
+ else:
+ raise ParseError, 'Unknown statement: %s' % repr(keyword)
+
+class PythonTokenizer:
+ """Utility wrapper over python tokenizer."""
+ def __init__(self, text):
+ self.text = text
+ readline = iter([text]).next
+ self.tokens = tokenize.generate_tokens(readline)
+ self.index = 0
+
+ def consume_till(self, delim):
+ """Consumes tokens till colon.
+
+ >>> tok = PythonTokenizer('for i in range(10): hello $i')
+ >>> tok.consume_till(':')
+ >>> tok.text[:tok.index]
+ 'for i in range(10):'
+ >>> tok.text[tok.index:]
+ ' hello $i'
+ """
+ try:
+ while True:
+ t = self.next()
+ if t.value == delim:
+ break
+ elif t.value == '(':
+ self.consume_till(')')
+ elif t.value == '[':
+ self.consume_till(']')
+ elif t.value == '{':
+ self.consume_till('}')
+
+ # if end of line is found, it is an exception.
+ # Since there is no easy way to report the line number,
+ # leave the error reporting to the python parser later
+ #@@ This should be fixed.
+ if t.value == '\n':
+ break
+ except:
+ #raise ParseError, "Expected %s, found end of line." % repr(delim)
+
+ # raising ParseError doesn't show the line number.
+ # if this error is ignored, then it will be caught when compiling the python code.
+ return
+
+ def next(self):
+ type, t, begin, end, line = self.tokens.next()
+ row, col = end
+ self.index = col
+ return storage(type=type, value=t, begin=begin, end=end)
+
+class DefwithNode:
+ def __init__(self, defwith, suite):
+ if defwith:
+ self.defwith = defwith.replace('with', '__template__') + ':'
+ else:
+ self.defwith = 'def __template__():'
+ self.suite = suite
+
+ def emit(self, indent):
+ return self.defwith + self.suite.emit(indent + INDENT)
+
+ def __repr__(self):
+ return "<defwith: %s, %s>" % (self.defwith, self.nodes)
+
+class TextNode:
+ def __init__(self, value):
+ self.value = value
+
+ def emit(self, indent):
+ return repr(self.value)
+
+ def __repr__(self):
+ return 't' + repr(self.value)
+
+class ExpressionNode:
+ def __init__(self, value, escape=True):
+ self.value = value.strip()
+
+ # convert ${...} to $(...)
+ if value.startswith('{') and value.endswith('}'):
+ self.value = '(' + self.value[1:-1] + ')'
+
+ self.escape = escape
+
+ def emit(self, indent):
+ return 'escape_(%s, %s)' % (self.value, bool(self.escape))
+
+ def __repr__(self):
+ if self.escape:
+ escape = ''
+ else:
+ escape = ':'
+ return "$%s%s" % (escape, self.value)
+
+class AssignmentNode:
+ def __init__(self, code):
+ self.code = code
+
+ def emit(self, indent, begin_indent=''):
+ return indent + self.code + "\n"
+
+ def __repr__(self):
+ return "<assignment: %s>" % repr(self.code)
+
+class LineNode:
+ def __init__(self, nodes):
+ self.nodes = nodes
+
+ def emit(self, indent, text_indent='', name=''):
+ text = [node.emit('') for node in self.nodes]
+ if text_indent:
+ text = [repr(text_indent)] + text
+ return indent + 'yield %s, join_(%s)\n' % (repr(name), ', '.join(text))
+
+ def __repr__(self):
+ return "<line: %s>" % repr(self.nodes)
+
+INDENT = ' ' # 4 spaces
+
+class BlockNode:
+ def __init__(self, stmt, block, begin_indent=''):
+ self.stmt = stmt
+ self.suite = Parser('').read_suite(block)
+ self.begin_indent = begin_indent
+
+ def emit(self, indent, text_indent=''):
+ text_indent = self.begin_indent + text_indent
+ out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
+ return out
+
+ def text(self):
+ return '${' + self.stmt + '}' + "".join([node.text(indent) for node in self.nodes])
+
+ def __repr__(self):
+ return "<block: %s, %s>" % (repr(self.stmt), repr(self.nodelist))
+
+class ForNode(BlockNode):
+ def __init__(self, stmt, block, begin_indent=''):
+ self.original_stmt = stmt
+ tok = PythonTokenizer(stmt)
+ tok.consume_till('in')
+ a = stmt[:tok.index] # for i in
+ b = stmt[tok.index:-1] # rest of for stmt excluding :
+ stmt = a + ' loop.setup(' + b.strip() + '):'
+ BlockNode.__init__(self, stmt, block, begin_indent)
+
+ def __repr__(self):
+ return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
+
+class CodeNode:
+ def __init__(self, stmt, block, begin_indent=''):
+ self.code = block
+
+ def emit(self, indent, text_indent=''):
+ import re
+ rx = re.compile('^', re.M)
+ return rx.sub(indent, self.code).rstrip(' ')
+
+ def __repr__(self):
+ return "<code: %s>" % repr(self.code)
+
+class IfNode(BlockNode):
+ pass
+
+class ElseNode(BlockNode):
+ pass
+
+class ElifNode(BlockNode):
+ pass
+
+class DefNode(BlockNode):
+ pass
+
+class VarNode:
+ def __init__(self, name, value):
+ self.name = name
+ self.value = value
+
+ def emit(self, indent, text_indent):
+ return indent + 'yield %s, %s\n' % (repr(self.name), self.value)
+
+ def __repr__(self):
+ return "<var: %s = %s>" % (self.name, self.value)
+
+class SuiteNode:
+ """Suite is a list of sections."""
+ def __init__(self, sections):
+ self.sections = sections
+
+ def emit(self, indent, text_indent=''):
+ return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
+
+ def __repr__(self):
+ return repr(self.sections)
+
+STATEMENT_NODES = {
+ 'for': ForNode,
+ 'while': BlockNode,
+ 'if': IfNode,
+ 'elif': ElifNode,
+ 'else': ElseNode,
+ 'def': DefNode,
+ 'code': CodeNode
+}
+
+KEYWORDS = [
+ "pass",
+ "break",
+ "continue",
+ "return"
+]
+
+TEMPLATE_BUILTIN_NAMES = [
+ "dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
+ "set", "slice", "tuple", "xrange",
+ "abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
+ "id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
+ "True", "False",
+ "None",
+ "__import__", # some c-libraries like datetime requires __import__ to present in the namespace
+]
+
+import __builtin__
+TEMPLATE_BUILTINS = dict([(name, getattr(__builtin__, name)) for name in TEMPLATE_BUILTIN_NAMES if name in __builtin__.__dict__])
+
+class ForLoop:
+ """
+ Wrapper for expression in for stament to support loop.xxx helpers.
+
+ >>> loop = ForLoop()
+ >>> for x in loop.setup(['a', 'b', 'c']):
+ ... print loop.index, loop.revindex, loop.parity, x
+ ...
+ 1 3 odd a
+ 2 2 even b
+ 3 1 odd c
+ >>> loop.index
+ Traceback (most recent call last):
+ ...
+ AttributeError: index
+ """
+ def __init__(self):
+ self._ctx = None
+
+ def __getattr__(self, name):
+ if self._ctx is None:
+ raise AttributeError, name
+ else:
+ return getattr(self._ctx, name)
+
+ def setup(self, seq):
+ self._push()
+ return self._ctx.setup(seq)
+
+ def _push(self):
+ self._ctx = ForLoopContext(self, self._ctx)
+
+ def _pop(self):
+ self._ctx = self._ctx.parent
+
+class ForLoopContext:
+ """Stackable context for ForLoop to support nested for loops.
+ """
+ def __init__(self, forloop, parent):
+ self._forloop = forloop
+ self.parent = parent
+
+ def setup(self, seq):
+ if hasattr(seq, '__len__'):
+ n = len(seq)
+ else:
+ n = 0
+
+ self.index = 0
+ seq = iter(seq)
+
+ # Pre python-2.5 does not support yield in try-except.
+ # This is a work-around to overcome that limitation.
+ def next(seq):
+ try:
+ return seq.next()
+ except:
+ self._forloop._pop()
+ raise
+
+ while True:
+ self._next(self.index + 1, n)
+ yield next(seq)
+
+ def _next(self, i, n):
+ self.index = i
+ self.index0 = i - 1
+ self.first = (i == 1)
+ self.last = (i == n)
+ self.odd = (i % 2 == 1)
+ self.even = (i % 2 == 0)
+ self.parity = ['odd', 'even'][self.even]
+ if n:
+ self.length = n
+ self.revindex0 = n - i
+ self.revindex = self.revindex0 + 1
+
+class BaseTemplate:
+ def __init__(self, code, filename, filter, globals, builtins):
+ self.filename = filename
+ self.filter = filter
+ self._globals = globals
+ self._builtins = builtins
+ if code:
+ self.t = self._compile(code)
+ else:
+ self.t = lambda: ''
+
+ def _compile(self, code):
+ env = self.make_env(self._globals or {}, self._builtins)
+ exec(code, env)
+ return env['__template__']
+
+ def __call__(self, *a, **kw):
+ out = self.t(*a, **kw)
+ return self._join_output(out)
+
+ def _join_output(self, out):
+ d = TemplateResult()
+ data = []
+
+ for name, value in out:
+ if name:
+ d[name] = value
+ else:
+ data.append(value)
+
+ d.__body__ = u"".join(data)
+ return d
+
+ def make_env(self, globals, builtins):
+ return dict(globals,
+ __builtins__=builtins,
+ loop=ForLoop(),
+ escape_=self._escape,
+ join_=self._join
+ )
+
+ def _join(self, *items):
+ return u"".join([safeunicode(item) for item in items])
+
+ def _escape(self, value, escape=False):
+ import types
+ if value is None:
+ value = ''
+ elif isinstance(value, types.GeneratorType):
+ value = self._join_output(value)
+
+ value = safeunicode(value)
+ if escape and self.filter:
+ value = self.filter(value)
+ return value
+
+class Template(BaseTemplate):
+ CONTENT_TYPES = {
+ '.html' : 'text/html; charset=utf-8',
+ '.xhtml' : 'application/xhtml+xml; charset=utf-8',
+ '.txt' : 'text/plain',
+ }
+ FILTERS = {
+ '.html': websafe,
+ '.xhtml': websafe,
+ '.xml': websafe
+ }
+ globals = {}
+
+ def __init__(self, text, filename='<template>', filter=None, globals=None, builtins=None):
+ text = Template.normalize_text(text)
+ code = self.compile_template(text, filename)
+
+ _, ext = os.path.splitext(filename)
+ filter = filter or self.FILTERS.get(ext, None)
+ self.content_type = self.CONTENT_TYPES.get(ext, None)
+
+ if globals is None:
+ globals = self.globals
+ if builtins is None:
+ builtins = TEMPLATE_BUILTINS
+
+ BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
+
+ def normalize_text(text):
+ """Normalizes template text by correcting \r\n, tabs and BOM chars."""
+ text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
+ if not text.endswith('\n'):
+ text += '\n'
+
+ # ignore BOM chars at the begining of template
+ BOM = '\xef\xbb\xbf'
+ if isinstance(text, str) and text.startswith(BOM):
+ text = text[len(BOM):]
+
+ # support fort \$ for backward-compatibility
+ text = text.replace(r'\$', '$$')
+ return text
+ normalize_text = staticmethod(normalize_text)
+
+ def __call__(self, *a, **kw):
+ import webapi as web
+ if 'headers' in web.ctx and self.content_type:
+ web.header('Content-Type', self.content_type, unique=True)
+
+ return BaseTemplate.__call__(self, *a, **kw)
+
+ def generate_code(text, filename):
+ # parse the text
+ rootnode = Parser(text, filename).parse()
+
+ # generate python code from the parse tree
+ code = rootnode.emit(indent="").strip()
+ return safestr(code)
+
+ generate_code = staticmethod(generate_code)
+
+ def compile_template(self, template_string, filename):
+ code = Template.generate_code(template_string, filename)
+
+ def get_source_line(filename, lineno):
+ try:
+ lines = open(filename).read().splitlines()
+ return lines[lineno]
+ except:
+ return None
+
+ try:
+ # compile the code first to report the errors, if any, with the filename
+ compiled_code = compile(code, filename, 'exec')
+ except SyntaxError, e:
+ # display template line that caused the error along with the traceback.
+ try:
+ e.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
+ (repr(e.filename), e.lineno, get_source_line(e.filename, e.lineno-1))
+ except:
+ pass
+ raise
+
+ # make sure code is safe
+ import compiler
+ ast = compiler.parse(code)
+ SafeVisitor().walk(ast, filename)
+
+ return compiled_code
+
+class CompiledTemplate(Template):
+ def __init__(self, f, filename):
+ Template.__init__(self, '', filename)
+ self.t = f
+
+ def compile_template(self, *a):
+ return None
+
+ def _compile(self, *a):
+ return None
+
+class Render:
+ """The most preferred way of using templates.
+
+ render = web.template.render('templates')
+ print render.foo()
+
+ Optional parameter can be `base` can be used to pass output of
+ every template through the base template.
+
+ render = web.template.render('templates', base='layout')
+ """
+ def __init__(self, loc='templates', cache=None, base=None, **keywords):
+ self._loc = loc
+ self._keywords = keywords
+
+ if cache is None:
+ cache = not config.get('debug', False)
+
+ if cache:
+ self._cache = {}
+ else:
+ self._cache = None
+
+ if base and not hasattr(base, '__call__'):
+ # make base a function, so that it can be passed to sub-renders
+ self._base = lambda page: self._template(base)(page)
+ else:
+ self._base = base
+
+ def _lookup(self, name):
+ path = os.path.join(self._loc, name)
+ if os.path.isdir(path):
+ return 'dir', path
+ else:
+ path = self._findfile(path)
+ if path:
+ return 'file', path
+ else:
+ return 'none', None
+
+ def _load_template(self, name):
+ kind, path = self._lookup(name)
+
+ if kind == 'dir':
+ return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
+ elif kind == 'file':
+ return Template(open(path).read(), filename=path, **self._keywords)
+ else:
+ raise AttributeError, "No template named " + name
+
+ def _findfile(self, path_prefix):
+ p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
+ return p and p[0]
+
+ def _template(self, name):
+ if self._cache is not None:
+ if name not in self._cache:
+ self._cache[name] = self._load_template(name)
+ return self._cache[name]
+ else:
+ return self._load_template(name)
+
+ def __getattr__(self, name):
+ t = self._template(name)
+ if self._base and isinstance(t, Template):
+ def template(*a, **kw):
+ return self._base(t(*a, **kw))
+ return template
+ else:
+ return self._template(name)
+
+class GAE_Render(Render):
+ # Render gets over-written. make a copy here.
+ super = Render
+ def __init__(self, loc, *a, **kw):
+ GAE_Render.super.__init__(self, loc, *a, **kw)
+
+ import types
+ if isinstance(loc, types.ModuleType):
+ self.mod = loc
+ else:
+ name = loc.rstrip('/').replace('/', '.')
+ self.mod = __import__(name, None, None, ['x'])
+
+ self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
+ self.mod.__dict__.update(Template.globals)
+ self.mod.__dict__.update(kw.get('globals', {}))
+
+ def _load_template(self, name):
+ t = getattr(self.mod, name)
+ import types
+ if isinstance(t, types.ModuleType):
+ return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
+ else:
+ return t
+
+render = Render
+# setup render for Google App Engine.
+try:
+ from google import appengine
+ render = Render = GAE_Render
+except ImportError:
+ pass
+
+def frender(path, **keywords):
+ """Creates a template from the given file path.
+ """
+ return Template(open(path).read(), filename=path, **keywords)
+
+def compile_templates(root):
+ """Compiles templates to python code."""
+ re_start = re_compile('^', re.M)
+
+ for dirpath, dirnames, filenames in os.walk(root):
+ filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
+
+ for d in dirnames[:]:
+ if d.startswith('.'):
+ dirnames.remove(d) # don't visit this dir
+
+ out = open(os.path.join(dirpath, '__init__.py'), 'w')
+ out.write('from web.template import CompiledTemplate, ForLoop\n\n')
+ if dirnames:
+ out.write("import " + ", ".join(dirnames))
+
+ for f in filenames:
+ path = os.path.join(dirpath, f)
+
+ if '.' in f:
+ name, _ = f.split('.', 1)
+ else:
+ name = f
+
+ text = open(path).read()
+ text = Template.normalize_text(text)
+ code = Template.generate_code(text, path)
+ code = re_start.sub(' ', code)
+
+ _gen = '' + \
+ '\ndef %s():' + \
+ '\n loop = ForLoop()' + \
+ '\n _dummy = CompiledTemplate(lambda: None, "dummy")' + \
+ '\n join_ = _dummy._join' + \
+ '\n escape_ = _dummy._escape' + \
+ '\n' + \
+ '\n%s' + \
+ '\n return __template__'
+
+ gen_code = _gen % (name, code)
+ out.write(gen_code)
+ out.write('\n\n')
+ out.write('%s = CompiledTemplate(%s(), %s)\n\n' % (name, name, repr(path)))
+
+ # create template to make sure it compiles
+ t = Template(open(path).read(), path)
+ out.close()
+
+class ParseError(Exception):
+ pass
+
+class SecurityError(Exception):
+ """The template seems to be trying to do something naughty."""
+ pass
+
+# Enumerate all the allowed AST nodes
+ALLOWED_AST_NODES = [
+ "Add", "And",
+# "AssAttr",
+ "AssList", "AssName", "AssTuple",
+# "Assert",
+ "Assign", "AugAssign",
+# "Backquote",
+ "Bitand", "Bitor", "Bitxor", "Break",
+ "CallFunc","Class", "Compare", "Const", "Continue",
+ "Decorators", "Dict", "Discard", "Div",
+ "Ellipsis", "EmptyNode",
+# "Exec",
+ "Expression", "FloorDiv", "For",
+# "From",
+ "Function",
+ "GenExpr", "GenExprFor", "GenExprIf", "GenExprInner",
+ "Getattr",
+# "Global",
+ "If", "IfExp",
+# "Import",
+ "Invert", "Keyword", "Lambda", "LeftShift",
+ "List", "ListComp", "ListCompFor", "ListCompIf", "Mod",
+ "Module",
+ "Mul", "Name", "Not", "Or", "Pass", "Power",
+# "Print", "Printnl", "Raise",
+ "Return", "RightShift", "Slice", "Sliceobj",
+ "Stmt", "Sub", "Subscript",
+# "TryExcept", "TryFinally",
+ "Tuple", "UnaryAdd", "UnarySub",
+ "While", "With", "Yield",
+]
+
+class SafeVisitor(object):
+ """
+ Make sure code is safe by walking through the AST.
+
+ Code considered unsafe if:
+ * it has restricted AST nodes
+ * it is trying to access resricted attributes
+
+ Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
+ """
+ def __init__(self):
+ "Initialize visitor by generating callbacks for all AST node types."
+ self.errors = []
+
+ def walk(self, ast, filename):
+ "Validate each node in AST and raise SecurityError if the code is not safe."
+ self.filename = filename
+ self.visit(ast)
+
+ if self.errors:
+ raise SecurityError, '\n'.join([str(err) for err in self.errors])
+
+ def visit(self, node, *args):
+ "Recursively validate node and all of its children."
+ def classname(obj):
+ return obj.__class__.__name__
+ nodename = classname(node)
+ fn = getattr(self, 'visit' + nodename, None)
+
+ if fn:
+ fn(node, *args)
+ else:
+ if nodename not in ALLOWED_AST_NODES:
+ self.fail(node, *args)
+
+ for child in node.getChildNodes():
+ self.visit(child, *args)
+
+ def visitName(self, node, *args):
+ "Disallow any attempts to access a restricted attr."
+ #self.assert_attr(node.getChildren()[0], node)
+ pass
+
+ def visitGetattr(self, node, *args):
+ "Disallow any attempts to access a restricted attribute."
+ self.assert_attr(node.attrname, node)
+
+ def assert_attr(self, attrname, node):
+ if self.is_unallowed_attr(attrname):
+ lineno = self.get_node_lineno(node)
+ e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
+ self.errors.append(e)
+
+ def is_unallowed_attr(self, name):
+ return name.startswith('_') \
+ or name.startswith('func_') \
+ or name.startswith('im_')
+
+ def get_node_lineno(self, node):
+ return (node.lineno) and node.lineno or 0
+
+ def fail(self, node, *args):
+ "Default callback for unallowed AST nodes."
+ lineno = self.get_node_lineno(node)
+ nodename = node.__class__.__name__
+ e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
+ self.errors.append(e)
+
+class TemplateResult(storage):
+ """Dictionary like object for storing template output.
+
+ A template can specify key-value pairs in the output using
+ `var` statements. Each `var` statement adds a new key to the
+ template output and the main output is stored with key
+ __body__.
+
+ >>> d = TemplateResult(__body__='hello, world', x='foo')
+ >>> d
+ <TemplateResult: {'__body__': 'hello, world', 'x': 'foo'}>
+ >>> print d
+ hello, world
+ """
+ def __unicode__(self):
+ return safeunicode(self.get('__body__', ''))
+
+ def __str__(self):
+ return safestr(self.get('__body__', ''))
+
+ def __repr__(self):
+ return "<TemplateResult: %s>" % dict.__repr__(self)
+
+def test():
+ r"""Doctest for testing template module.
+
+ Define a utility function to run template test.
+
+ >>> class TestResult(TemplateResult):
+ ... def __repr__(self): return repr(unicode(self))
+ ...
+ >>> def t(code, **keywords):
+ ... tmpl = Template(code, **keywords)
+ ... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
+ ...
+
+ Simple tests.
+
+ >>> t('1')()
+ u'1\n'
+ >>> t('$def with ()\n1')()
+ u'1\n'
+ >>> t('$def with (a)\n$a')(1)
+ u'1\n'
+ >>> t('$def with (a=0)\n$a')(1)
+ u'1\n'
+ >>> t('$def with (a=0)\n$a')(a=1)
+ u'1\n'
+
+ Test complicated expressions.
+
+ >>> t('$def with (x)\n$x.upper()')('hello')
+ u'HELLO\n'
+ >>> t('$(2 * 3 + 4 * 5)')()
+ u'26\n'
+ >>> t('${2 * 3 + 4 * 5}')()
+ u'26\n'
+ >>> t('$def with (limit)\nkeep $(limit)ing.')('go')
+ u'keep going.\n'
+ >>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
+ u'1\n'
+
+ Test html escaping.
+
+ >>> t('$def with (x)\n$x', filename='a.html')('<html>')
+ u'<html>\n'
+ >>> t('$def with (x)\n$x', filename='a.txt')('<html>')
+ u'<html>\n'
+
+ Test if, for and while.
+
+ >>> t('$if 1: 1')()
+ u'1\n'
+ >>> t('$if 1:\n 1')()
+ u'1\n'
+ >>> t('$if 1:\n 1\\')()
+ u'1'
+ >>> t('$if 0: 0\n$elif 1: 1')()
+ u'1\n'
+ >>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
+ u'1\n'
+ >>> t('$if 0 < 1 and 1 < 2: 1')()
+ u'1\n'
+ >>> t('$for x in [1, 2, 3]: $x')()
+ u'1\n2\n3\n'
+ >>> t('$def with (d)\n$for k, v in d.iteritems(): $k')({1: 1})
+ u'1\n'
+ >>> t('$for x in [1, 2, 3]:\n\t$x')()
+ u' 1\n 2\n 3\n'
+ >>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
+ u'1\n1\n1\n'
+
+ The space after : must be ignored.
+
+ >>> t('$if True: foo')()
+ u'foo\n'
+
+ Test loop.xxx.
+
+ >>> t("$for i in range(5):$loop.index, $loop.parity")()
+ u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
+ >>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
+ u'odd odd\nodd even\neven odd\neven even\n'
+
+ Test assignment.
+
+ >>> t('$ a = 1\n$a')()
+ u'1\n'
+ >>> t('$ a = [1]\n$a[0]')()
+ u'1\n'
+ >>> t('$ a = {1: 1}\n$a.keys()[0]')()
+ u'1\n'
+ >>> t('$ a = []\n$if not a: 1')()
+ u'1\n'
+ >>> t('$ a = {}\n$if not a: 1')()
+ u'1\n'
+ >>> t('$ a = -1\n$a')()
+ u'-1\n'
+ >>> t('$ a = "1"\n$a')()
+ u'1\n'
+
+ Test comments.
+
+ >>> t('$# 0')()
+ u'\n'
+ >>> t('hello$#comment1\nhello$#comment2')()
+ u'hello\nhello\n'
+ >>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
+ u'\nhello\nhello\n'
+
+ Test unicode.
+
+ >>> t('$def with (a)\n$a')(u'\u203d')
+ u'\u203d\n'
+ >>> t('$def with (a)\n$a')(u'\u203d'.encode('utf-8'))
+ u'\u203d\n'
+ >>> t(u'$def with (a)\n$a $:a')(u'\u203d')
+ u'\u203d \u203d\n'
+ >>> t(u'$def with ()\nfoo')()
+ u'foo\n'
+ >>> def f(x): return x
+ ...
+ >>> t(u'$def with (f)\n$:f("x")')(f)
+ u'x\n'
+ >>> t('$def with (f)\n$:f("x")')(f)
+ u'x\n'
+
+ Test dollar escaping.
+
+ >>> t("Stop, $$money isn't evaluated.")()
+ u"Stop, $money isn't evaluated.\n"
+ >>> t("Stop, \$money isn't evaluated.")()
+ u"Stop, $money isn't evaluated.\n"
+
+ Test space sensitivity.
+
+ >>> t('$def with (x)\n$x')(1)
+ u'1\n'
+ >>> t('$def with(x ,y)\n$x')(1, 1)
+ u'1\n'
+ >>> t('$(1 + 2*3 + 4)')()
+ u'11\n'
+
+ Make sure globals are working.
+
+ >>> t('$x')()
+ Traceback (most recent call last):
+ ...
+ NameError: global name 'x' is not defined
+ >>> t('$x', globals={'x': 1})()
+ u'1\n'
+
+ Can't change globals.
+
+ >>> t('$ x = 2\n$x', globals={'x': 1})()
+ u'2\n'
+ >>> t('$ x = x + 1\n$x', globals={'x': 1})()
+ Traceback (most recent call last):
+ ...
+ UnboundLocalError: local variable 'x' referenced before assignment
+
+ Make sure builtins are customizable.
+
+ >>> t('$min(1, 2)')()
+ u'1\n'
+ >>> t('$min(1, 2)', builtins={})()
+ Traceback (most recent call last):
+ ...
+ NameError: global name 'min' is not defined
+
+ Test vars.
+
+ >>> x = t('$var x: 1')()
+ >>> x.x
+ u'1'
+ >>> x = t('$var x = 1')()
+ >>> x.x
+ 1
+ >>> x = t('$var x: \n foo\n bar')()
+ >>> x.x
+ u'foo\nbar\n'
+
+ Test BOM chars.
+
+ >>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
+ u'foo\n'
+
+ Test for with weird cases.
+
+ >>> t('$for i in range(10)[1:5]:\n $i')()
+ u'1\n2\n3\n4\n'
+ >>> t("$for k, v in {'a': 1, 'b': 2}.items():\n $k $v")()
+ u'a 1\nb 2\n'
+ >>> t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
+ Traceback (most recent call last):
+ ...
+ SyntaxError: invalid syntax
+
+ Test datetime.
+
+ >>> import datetime
+ >>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
+ u'01 2009\n'
+ """
+ pass
+
+if __name__ == "__main__":
+ import sys
+ if '--compile' in sys.argv:
+ compile_templates(sys.argv[2])
+ else:
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/test.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,51 @@
+"""test utilities
+(part of web.py)
+"""
+import unittest
+import sys, os
+import web
+
+TestCase = unittest.TestCase
+TestSuite = unittest.TestSuite
+
+def load_modules(names):
+ return [__import__(name, None, None, "x") for name in names]
+
+def module_suite(module, classnames=None):
+ """Makes a suite from a module."""
+ if classnames:
+ return unittest.TestLoader().loadTestsFromNames(classnames, module)
+ elif hasattr(module, 'suite'):
+ return module.suite()
+ else:
+ return unittest.TestLoader().loadTestsFromModule(module)
+
+def doctest_suite(module_names):
+ """Makes a test suite from doctests."""
+ import doctest
+ suite = TestSuite()
+ for mod in load_modules(module_names):
+ suite.addTest(doctest.DocTestSuite(mod))
+ return suite
+
+def suite(module_names):
+ """Creates a suite from multiple modules."""
+ suite = TestSuite()
+ for mod in load_modules(module_names):
+ suite.addTest(module_suite(mod))
+ return suite
+
+def runTests(suite):
+ runner = unittest.TextTestRunner()
+ return runner.run(suite)
+
+def main(suite=None):
+ if not suite:
+ main_module = __import__('__main__')
+ # allow command line switches
+ args = [a for a in sys.argv[1:] if not a.startswith('-')]
+ suite = module_suite(main_module, args or None)
+
+ result = runTests(suite)
+ sys.exit(not result.wasSuccessful())
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/utils.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,1111 @@
+#!/usr/bin/env python
+"""
+General Utilities
+(part of web.py)
+"""
+
+__all__ = [
+ "Storage", "storage", "storify",
+ "iters",
+ "rstrips", "lstrips", "strips",
+ "safeunicode", "safestr", "utf8",
+ "TimeoutError", "timelimit",
+ "Memoize", "memoize",
+ "re_compile", "re_subm",
+ "group", "uniq", "iterview",
+ "IterBetter", "iterbetter",
+ "dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
+ "listget", "intget", "datestr",
+ "numify", "denumify", "commify", "dateify",
+ "nthstr",
+ "CaptureStdout", "capturestdout", "Profile", "profile",
+ "tryall",
+ "ThreadedDict", "threadeddict",
+ "autoassign",
+ "to36",
+ "safemarkdown",
+ "sendmail"
+]
+
+import re, sys, time, threading, itertools
+
+try:
+ import subprocess
+except ImportError:
+ subprocess = None
+
+try: import datetime
+except ImportError: pass
+
+try: set
+except NameError:
+ from sets import Set as set
+
+class Storage(dict):
+ """
+ A Storage object is like a dictionary except `obj.foo` can be used
+ in addition to `obj['foo']`.
+
+ >>> o = storage(a=1)
+ >>> o.a
+ 1
+ >>> o['a']
+ 1
+ >>> o.a = 2
+ >>> o['a']
+ 2
+ >>> del o.a
+ >>> o.a
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'a'
+
+ """
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __setattr__(self, key, value):
+ self[key] = value
+
+ def __delattr__(self, key):
+ try:
+ del self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __repr__(self):
+ return '<Storage ' + dict.__repr__(self) + '>'
+
+storage = Storage
+
+def storify(mapping, *requireds, **defaults):
+ """
+ Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
+ d doesn't have all of the keys in `requireds` and using the default
+ values for keys found in `defaults`.
+
+ For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
+ `storage({'a':1, 'b':2, 'c':3})`.
+
+ If a `storify` value is a list (e.g. multiple values in a form submission),
+ `storify` returns the last element of the list, unless the key appears in
+ `defaults` as a list. Thus:
+
+ >>> storify({'a':[1, 2]}).a
+ 2
+ >>> storify({'a':[1, 2]}, a=[]).a
+ [1, 2]
+ >>> storify({'a':1}, a=[]).a
+ [1]
+ >>> storify({}, a=[]).a
+ []
+
+ Similarly, if the value has a `value` attribute, `storify will return _its_
+ value, unless the key appears in `defaults` as a dictionary.
+
+ >>> storify({'a':storage(value=1)}).a
+ 1
+ >>> storify({'a':storage(value=1)}, a={}).a
+ <Storage {'value': 1}>
+ >>> storify({}, a={}).a
+ {}
+
+ Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
+
+ >>> storify({'x': 'a'}, _unicode=True)
+ <Storage {'x': u'a'}>
+ >>> storify({'x': storage(value='a')}, x={}, _unicode=True)
+ <Storage {'x': <Storage {'value': 'a'}>}>
+ >>> storify({'x': storage(value='a')}, _unicode=True)
+ <Storage {'x': u'a'}>
+ """
+ _unicode = defaults.pop('_unicode', False)
+ def unicodify(s):
+ if _unicode and isinstance(s, str): return safeunicode(s)
+ else: return s
+
+ def getvalue(x):
+ if hasattr(x, 'value'):
+ return unicodify(x.value)
+ else:
+ return unicodify(x)
+
+ stor = Storage()
+ for key in requireds + tuple(mapping.keys()):
+ value = mapping[key]
+ if isinstance(value, list):
+ if isinstance(defaults.get(key), list):
+ value = [getvalue(x) for x in value]
+ else:
+ value = value[-1]
+ if not isinstance(defaults.get(key), dict):
+ value = getvalue(value)
+ if isinstance(defaults.get(key), list) and not isinstance(value, list):
+ value = [value]
+ setattr(stor, key, value)
+
+ for (key, value) in defaults.iteritems():
+ result = value
+ if hasattr(stor, key):
+ result = stor[key]
+ if value == () and not isinstance(result, tuple):
+ result = (result,)
+ setattr(stor, key, result)
+
+ return stor
+
+iters = [list, tuple]
+import __builtin__
+if hasattr(__builtin__, 'set'):
+ iters.append(set)
+if hasattr(__builtin__, 'frozenset'):
+ iters.append(set)
+if sys.version_info < (2,6): # sets module deprecated in 2.6
+ try:
+ from sets import Set
+ iters.append(Set)
+ except ImportError:
+ pass
+
+class _hack(tuple): pass
+iters = _hack(iters)
+iters.__doc__ = """
+A list of iterable items (like lists, but not strings). Includes whichever
+of lists, tuples, sets, and Sets are available in this version of Python.
+"""
+
+def _strips(direction, text, remove):
+ if direction == 'l':
+ if text.startswith(remove):
+ return text[len(remove):]
+ elif direction == 'r':
+ if text.endswith(remove):
+ return text[:-len(remove)]
+ else:
+ raise ValueError, "Direction needs to be r or l."
+ return text
+
+def rstrips(text, remove):
+ """
+ removes the string `remove` from the right of `text`
+
+ >>> rstrips("foobar", "bar")
+ 'foo'
+
+ """
+ return _strips('r', text, remove)
+
+def lstrips(text, remove):
+ """
+ removes the string `remove` from the left of `text`
+
+ >>> lstrips("foobar", "foo")
+ 'bar'
+
+ """
+ return _strips('l', text, remove)
+
+def strips(text, remove):
+ """
+ removes the string `remove` from the both sides of `text`
+
+ >>> strips("foobarfoo", "foo")
+ 'bar'
+
+ """
+ return rstrips(lstrips(text, remove), remove)
+
+def safeunicode(obj, encoding='utf-8'):
+ r"""
+ Converts any given object to unicode string.
+
+ >>> safeunicode('hello')
+ u'hello'
+ >>> safeunicode(2)
+ u'2'
+ >>> safeunicode('\xe1\x88\xb4')
+ u'\u1234'
+ """
+ if isinstance(obj, unicode):
+ return obj
+ elif isinstance(obj, str):
+ return obj.decode(encoding)
+ else:
+ if hasattr(obj, '__unicode__'):
+ return unicode(obj)
+ else:
+ return str(obj).decode(encoding)
+
+def safestr(obj, encoding='utf-8'):
+ r"""
+ Converts any given object to utf-8 encoded string.
+
+ >>> safestr('hello')
+ 'hello'
+ >>> safestr(u'\u1234')
+ '\xe1\x88\xb4'
+ >>> safestr(2)
+ '2'
+ """
+ if isinstance(obj, unicode):
+ return obj.encode('utf-8')
+ elif isinstance(obj, str):
+ return obj
+ elif hasattr(obj, 'next') and hasattr(obj, '__iter__'): # iterator
+ return itertools.imap(safestr, obj)
+ else:
+ return str(obj)
+
+# for backward-compatibility
+utf8 = safestr
+
+class TimeoutError(Exception): pass
+def timelimit(timeout):
+ """
+ A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
+ if it takes longer.
+
+ >>> import time
+ >>> def meaningoflife():
+ ... time.sleep(.2)
+ ... return 42
+ >>>
+ >>> timelimit(.1)(meaningoflife)()
+ Traceback (most recent call last):
+ ...
+ TimeoutError: took too long
+ >>> timelimit(1)(meaningoflife)()
+ 42
+
+ _Caveat:_ The function isn't stopped after `timeout` seconds but continues
+ executing in a separate thread. (There seems to be no way to kill a thread.)
+
+ inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
+ """
+ def _1(function):
+ def _2(*args, **kw):
+ class Dispatch(threading.Thread):
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self.result = None
+ self.error = None
+
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ self.result = function(*args, **kw)
+ except:
+ self.error = sys.exc_info()
+
+ c = Dispatch()
+ c.join(timeout)
+ if c.isAlive():
+ raise TimeoutError, 'took too long'
+ if c.error:
+ raise c.error[0], c.error[1]
+ return c.result
+ return _2
+ return _1
+
+class Memoize:
+ """
+ 'Memoizes' a function, caching its return values for each input.
+
+ >>> import time
+ >>> def meaningoflife():
+ ... time.sleep(.2)
+ ... return 42
+ >>> fastlife = memoize(meaningoflife)
+ >>> meaningoflife()
+ 42
+ >>> timelimit(.1)(meaningoflife)()
+ Traceback (most recent call last):
+ ...
+ TimeoutError: took too long
+ >>> fastlife()
+ 42
+ >>> timelimit(.1)(fastlife)()
+ 42
+
+ """
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+ def __call__(self, *args, **keywords):
+ key = (args, tuple(keywords.items()))
+ if key not in self.cache:
+ self.cache[key] = self.func(*args, **keywords)
+ return self.cache[key]
+
+memoize = Memoize
+
+re_compile = memoize(re.compile) #@@ threadsafe?
+re_compile.__doc__ = """
+A memoized version of re.compile.
+"""
+
+class _re_subm_proxy:
+ def __init__(self):
+ self.match = None
+ def __call__(self, match):
+ self.match = match
+ return ''
+
+def re_subm(pat, repl, string):
+ """
+ Like re.sub, but returns the replacement _and_ the match object.
+
+ >>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
+ >>> t
+ 'foooooolish'
+ >>> m.groups()
+ ('oooooo',)
+ """
+ compiled_pat = re_compile(pat)
+ proxy = _re_subm_proxy()
+ compiled_pat.sub(proxy.__call__, string)
+ return compiled_pat.sub(repl, string), proxy.match
+
+def group(seq, size):
+ """
+ Returns an iterator over a series of lists of length size from iterable.
+
+ >>> list(group([1,2,3,4], 2))
+ [[1, 2], [3, 4]]
+ """
+ if not hasattr(seq, 'next'):
+ seq = iter(seq)
+ while True:
+ yield [seq.next() for i in xrange(size)]
+
+def uniq(seq):
+ """
+ Removes duplicate elements from a list.
+
+ >>> uniq([1,2,3,1,4,5,6])
+ [1, 2, 3, 4, 5, 6]
+ """
+ seen = set()
+ result = []
+ for item in seq:
+ if item in seen: continue
+ seen.add(item)
+ result.append(item)
+ return result
+
+def iterview(x):
+ """
+ Takes an iterable `x` and returns an iterator over it
+ which prints its progress to stderr as it iterates through.
+ """
+ WIDTH = 70
+
+ def plainformat(n, lenx):
+ return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
+
+ def bars(size, n, lenx):
+ val = int((float(n)*size)/lenx + 0.5)
+ if size - val:
+ spacing = ">" + (" "*(size-val))[1:]
+ else:
+ spacing = ""
+ return "[%s%s]" % ("="*val, spacing)
+
+ def eta(elapsed, n, lenx):
+ if n == 0:
+ return '--:--:--'
+ if n == lenx:
+ secs = int(elapsed)
+ else:
+ secs = int((elapsed/n) * (lenx-n))
+ mins, secs = divmod(secs, 60)
+ hrs, mins = divmod(mins, 60)
+
+ return '%02d:%02d:%02d' % (hrs, mins, secs)
+
+ def format(starttime, n, lenx):
+ out = plainformat(n, lenx) + ' '
+ if n == lenx:
+ end = ' '
+ else:
+ end = ' ETA '
+ end += eta(time.time() - starttime, n, lenx)
+ out += bars(WIDTH - len(out) - len(end), n, lenx)
+ out += end
+ return out
+
+ starttime = time.time()
+ lenx = len(x)
+ for n, y in enumerate(x):
+ sys.stderr.write('\r' + format(starttime, n, lenx))
+ yield y
+ sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
+
+class IterBetter:
+ """
+ Returns an object that can be used as an iterator
+ but can also be used via __getitem__ (although it
+ cannot go backwards -- that is, you cannot request
+ `iterbetter[0]` after requesting `iterbetter[1]`).
+
+ >>> import itertools
+ >>> c = iterbetter(itertools.count())
+ >>> c[1]
+ 1
+ >>> c[5]
+ 5
+ >>> c[3]
+ Traceback (most recent call last):
+ ...
+ IndexError: already passed 3
+ """
+ def __init__(self, iterator):
+ self.i, self.c = iterator, 0
+ def __iter__(self):
+ while 1:
+ yield self.i.next()
+ self.c += 1
+ def __getitem__(self, i):
+ #todo: slices
+ if i < self.c:
+ raise IndexError, "already passed "+str(i)
+ try:
+ while i > self.c:
+ self.i.next()
+ self.c += 1
+ # now self.c == i
+ self.c += 1
+ return self.i.next()
+ except StopIteration:
+ raise IndexError, str(i)
+iterbetter = IterBetter
+
+def dictreverse(mapping):
+ """
+ Returns a new dictionary with keys and values swapped.
+
+ >>> dictreverse({1: 2, 3: 4})
+ {2: 1, 4: 3}
+ """
+ return dict([(value, key) for (key, value) in mapping.iteritems()])
+
+def dictfind(dictionary, element):
+ """
+ Returns a key whose value in `dictionary` is `element`
+ or, if none exists, None.
+
+ >>> d = {1:2, 3:4}
+ >>> dictfind(d, 4)
+ 3
+ >>> dictfind(d, 5)
+ """
+ for (key, value) in dictionary.iteritems():
+ if element is value:
+ return key
+
+def dictfindall(dictionary, element):
+ """
+ Returns the keys whose values in `dictionary` are `element`
+ or, if none exists, [].
+
+ >>> d = {1:4, 3:4}
+ >>> dictfindall(d, 4)
+ [1, 3]
+ >>> dictfindall(d, 5)
+ []
+ """
+ res = []
+ for (key, value) in dictionary.iteritems():
+ if element is value:
+ res.append(key)
+ return res
+
+def dictincr(dictionary, element):
+ """
+ Increments `element` in `dictionary`,
+ setting it to one if it doesn't exist.
+
+ >>> d = {1:2, 3:4}
+ >>> dictincr(d, 1)
+ 3
+ >>> d[1]
+ 3
+ >>> dictincr(d, 5)
+ 1
+ >>> d[5]
+ 1
+ """
+ dictionary.setdefault(element, 0)
+ dictionary[element] += 1
+ return dictionary[element]
+
+def dictadd(*dicts):
+ """
+ Returns a dictionary consisting of the keys in the argument dictionaries.
+ If they share a key, the value from the last argument is used.
+
+ >>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
+ {1: 0, 2: 1, 3: 1}
+ """
+ result = {}
+ for dct in dicts:
+ result.update(dct)
+ return result
+
+def listget(lst, ind, default=None):
+ """
+ Returns `lst[ind]` if it exists, `default` otherwise.
+
+ >>> listget(['a'], 0)
+ 'a'
+ >>> listget(['a'], 1)
+ >>> listget(['a'], 1, 'b')
+ 'b'
+ """
+ if len(lst)-1 < ind:
+ return default
+ return lst[ind]
+
+def intget(integer, default=None):
+ """
+ Returns `integer` as an int or `default` if it can't.
+
+ >>> intget('3')
+ 3
+ >>> intget('3a')
+ >>> intget('3a', 0)
+ 0
+ """
+ try:
+ return int(integer)
+ except (TypeError, ValueError):
+ return default
+
+def datestr(then, now=None):
+ """
+ Converts a (UTC) datetime object to a nice string representation.
+
+ >>> from datetime import datetime, timedelta
+ >>> d = datetime(1970, 5, 1)
+ >>> datestr(d, now=d)
+ '0 microseconds ago'
+ >>> for t, v in {
+ ... timedelta(microseconds=1): '1 microsecond ago',
+ ... timedelta(microseconds=2): '2 microseconds ago',
+ ... -timedelta(microseconds=1): '1 microsecond from now',
+ ... -timedelta(microseconds=2): '2 microseconds from now',
+ ... timedelta(microseconds=2000): '2 milliseconds ago',
+ ... timedelta(seconds=2): '2 seconds ago',
+ ... timedelta(seconds=2*60): '2 minutes ago',
+ ... timedelta(seconds=2*60*60): '2 hours ago',
+ ... timedelta(days=2): '2 days ago',
+ ... }.iteritems():
+ ... assert datestr(d, now=d+t) == v
+ >>> datestr(datetime(1970, 1, 1), now=d)
+ 'January 1'
+ >>> datestr(datetime(1969, 1, 1), now=d)
+ 'January 1, 1969'
+ >>> datestr(datetime(1970, 6, 1), now=d)
+ 'June 1, 1970'
+ >>> datestr(None)
+ ''
+ """
+ def agohence(n, what, divisor=None):
+ if divisor: n = n // divisor
+
+ out = str(abs(n)) + ' ' + what # '2 day'
+ if abs(n) != 1: out += 's' # '2 days'
+ out += ' ' # '2 days '
+ if n < 0:
+ out += 'from now'
+ else:
+ out += 'ago'
+ return out # '2 days ago'
+
+ oneday = 24 * 60 * 60
+
+ if not then: return ""
+ if not now: now = datetime.datetime.utcnow()
+ if type(now).__name__ == "DateTime":
+ now = datetime.datetime.fromtimestamp(now)
+ if type(then).__name__ == "DateTime":
+ then = datetime.datetime.fromtimestamp(then)
+ elif type(then).__name__ == "date":
+ then = datetime.datetime(then.year, then.month, then.day)
+
+ delta = now - then
+ deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
+ deltadays = abs(deltaseconds) // oneday
+ if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
+
+ if deltadays:
+ if abs(deltadays) < 4:
+ return agohence(deltadays, 'day')
+
+ out = then.strftime('%B %e') # e.g. 'June 13'
+ if then.year != now.year or deltadays < 0:
+ out += ', %s' % then.year
+ return out
+
+ if int(deltaseconds):
+ if abs(deltaseconds) > (60 * 60):
+ return agohence(deltaseconds, 'hour', 60 * 60)
+ elif abs(deltaseconds) > 60:
+ return agohence(deltaseconds, 'minute', 60)
+ else:
+ return agohence(deltaseconds, 'second')
+
+ deltamicroseconds = delta.microseconds
+ if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
+ if abs(deltamicroseconds) > 1000:
+ return agohence(deltamicroseconds, 'millisecond', 1000)
+
+ return agohence(deltamicroseconds, 'microsecond')
+
+def numify(string):
+ """
+ Removes all non-digit characters from `string`.
+
+ >>> numify('800-555-1212')
+ '8005551212'
+ >>> numify('800.555.1212')
+ '8005551212'
+
+ """
+ return ''.join([c for c in str(string) if c.isdigit()])
+
+def denumify(string, pattern):
+ """
+ Formats `string` according to `pattern`, where the letter X gets replaced
+ by characters from `string`.
+
+ >>> denumify("8005551212", "(XXX) XXX-XXXX")
+ '(800) 555-1212'
+
+ """
+ out = []
+ for c in pattern:
+ if c == "X":
+ out.append(string[0])
+ string = string[1:]
+ else:
+ out.append(c)
+ return ''.join(out)
+
+def commify(n):
+ """
+ Add commas to an integer `n`.
+
+ >>> commify(1)
+ '1'
+ >>> commify(123)
+ '123'
+ >>> commify(1234)
+ '1,234'
+ >>> commify(1234567890)
+ '1,234,567,890'
+ >>> commify(123.0)
+ '123.0'
+ >>> commify(1234.5)
+ '1,234.5'
+ >>> commify(1234.56789)
+ '1,234.56789'
+ >>> commify('%.2f' % 1234.5)
+ '1,234.50'
+ >>> commify(None)
+ >>>
+
+ """
+ if n is None: return None
+ n = str(n)
+ if '.' in n:
+ dollars, cents = n.split('.')
+ else:
+ dollars, cents = n, None
+
+ r = []
+ for i, c in enumerate(str(dollars)[::-1]):
+ if i and (not (i % 3)):
+ r.insert(0, ',')
+ r.insert(0, c)
+ out = ''.join(r)
+ if cents:
+ out += '.' + cents
+ return out
+
+def dateify(datestring):
+ """
+ Formats a numified `datestring` properly.
+ """
+ return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
+
+
+def nthstr(n):
+ """
+ Formats an ordinal.
+ Doesn't handle negative numbers.
+
+ >>> nthstr(1)
+ '1st'
+ >>> nthstr(0)
+ '0th'
+ >>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
+ ['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
+ >>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
+ ['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
+ >>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
+ ['111th', '112th', '113th', '114th', '115th']
+
+ """
+
+ assert n >= 0
+ if n % 100 in [11, 12, 13]: return '%sth' % n
+ return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
+
+def cond(predicate, consequence, alternative=None):
+ """
+ Function replacement for if-else to use in expressions.
+
+ >>> x = 2
+ >>> cond(x % 2 == 0, "even", "odd")
+ 'even'
+ >>> cond(x % 2 == 0, "even", "odd") + '_row'
+ 'even_row'
+ """
+ if predicate:
+ return consequence
+ else:
+ return alternative
+
+class CaptureStdout:
+ """
+ Captures everything `func` prints to stdout and returns it instead.
+
+ >>> def idiot():
+ ... print "foo"
+ >>> capturestdout(idiot)()
+ 'foo\\n'
+
+ **WARNING:** Not threadsafe!
+ """
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args, **keywords):
+ from cStringIO import StringIO
+ # Not threadsafe!
+ out = StringIO()
+ oldstdout = sys.stdout
+ sys.stdout = out
+ try:
+ self.func(*args, **keywords)
+ finally:
+ sys.stdout = oldstdout
+ return out.getvalue()
+
+capturestdout = CaptureStdout
+
+class Profile:
+ """
+ Profiles `func` and returns a tuple containing its output
+ and a string with human-readable profiling information.
+
+ >>> import time
+ >>> out, inf = profile(time.sleep)(.001)
+ >>> out
+ >>> inf[:10].strip()
+ 'took 0.0'
+ """
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args): ##, **kw): kw unused
+ import hotshot, hotshot.stats, tempfile ##, time already imported
+ temp = tempfile.NamedTemporaryFile()
+ prof = hotshot.Profile(temp.name)
+
+ stime = time.time()
+ result = prof.runcall(self.func, *args)
+ stime = time.time() - stime
+ prof.close()
+
+ import cStringIO
+ out = cStringIO.StringIO()
+ stats = hotshot.stats.load(temp.name)
+ stats.stream = out
+ stats.strip_dirs()
+ stats.sort_stats('time', 'calls')
+ stats.print_stats(40)
+ stats.print_callers()
+
+ x = '\n\ntook '+ str(stime) + ' seconds\n'
+ x += out.getvalue()
+
+ return result, x
+
+profile = Profile
+
+
+import traceback
+# hack for compatibility with Python 2.3:
+if not hasattr(traceback, 'format_exc'):
+ from cStringIO import StringIO
+ def format_exc(limit=None):
+ strbuf = StringIO()
+ traceback.print_exc(limit, strbuf)
+ return strbuf.getvalue()
+ traceback.format_exc = format_exc
+
+def tryall(context, prefix=None):
+ """
+ Tries a series of functions and prints their results.
+ `context` is a dictionary mapping names to values;
+ the value will only be tried if it's callable.
+
+ >>> tryall(dict(j=lambda: True))
+ j: True
+ ----------------------------------------
+ results:
+ True: 1
+
+ For example, you might have a file `test/stuff.py`
+ with a series of functions testing various things in it.
+ At the bottom, have a line:
+
+ if __name__ == "__main__": tryall(globals())
+
+ Then you can run `python test/stuff.py` and get the results of
+ all the tests.
+ """
+ context = context.copy() # vars() would update
+ results = {}
+ for (key, value) in context.iteritems():
+ if not hasattr(value, '__call__'):
+ continue
+ if prefix and not key.startswith(prefix):
+ continue
+ print key + ':',
+ try:
+ r = value()
+ dictincr(results, r)
+ print r
+ except:
+ print 'ERROR'
+ dictincr(results, 'ERROR')
+ print ' ' + '\n '.join(traceback.format_exc().split('\n'))
+
+ print '-'*40
+ print 'results:'
+ for (key, value) in results.iteritems():
+ print ' '*2, str(key)+':', value
+
+class ThreadedDict:
+ """
+ Thread local storage.
+
+ >>> d = ThreadedDict()
+ >>> d.x = 1
+ >>> d.x
+ 1
+ >>> import threading
+ >>> def f(): d.x = 2
+ ...
+ >>> t = threading.Thread(target=f)
+ >>> t.start()
+ >>> t.join()
+ >>> d.x
+ 1
+ """
+ def __getattr__(self, key):
+ return getattr(self._getd(), key)
+
+ def __setattr__(self, key, value):
+ return setattr(self._getd(), key, value)
+
+ def __delattr__(self, key):
+ return delattr(self._getd(), key)
+
+ def __hash__(self):
+ return id(self)
+
+ def _getd(self):
+ t = threading.currentThread()
+ if not hasattr(t, '_d'):
+ # using __dict__ of thread as thread local storage
+ t._d = {}
+
+ # there could be multiple instances of ThreadedDict.
+ # use self as key
+ if self not in t._d:
+ t._d[self] = storage()
+ return t._d[self]
+
+threadeddict = ThreadedDict
+
+def autoassign(self, locals):
+ """
+ Automatically assigns local variables to `self`.
+
+ >>> self = storage()
+ >>> autoassign(self, dict(a=1, b=2))
+ >>> self
+ <Storage {'a': 1, 'b': 2}>
+
+ Generally used in `__init__` methods, as in:
+
+ def __init__(self, foo, bar, baz=1): autoassign(self, locals())
+ """
+ for (key, value) in locals.iteritems():
+ if key == 'self':
+ continue
+ setattr(self, key, value)
+
+def to36(q):
+ """
+ Converts an integer to base 36 (a useful scheme for human-sayable IDs).
+
+ >>> to36(35)
+ 'z'
+ >>> to36(119292)
+ '2k1o'
+ >>> int(to36(939387374), 36)
+ 939387374
+ >>> to36(0)
+ '0'
+ >>> to36(-393)
+ Traceback (most recent call last):
+ ...
+ ValueError: must supply a positive integer
+
+ """
+ if q < 0: raise ValueError, "must supply a positive integer"
+ letters = "0123456789abcdefghijklmnopqrstuvwxyz"
+ converted = []
+ while q != 0:
+ q, r = divmod(q, 36)
+ converted.insert(0, letters[r])
+ return "".join(converted) or '0'
+
+
+r_url = re_compile('(?<!\()(http://(\S+))')
+def safemarkdown(text):
+ """
+ Converts text to HTML following the rules of Markdown, but blocking any
+ outside HTML input, so that only the things supported by Markdown
+ can be used. Also converts raw URLs to links.
+
+ (requires [markdown.py](http://webpy.org/markdown.py))
+ """
+ from markdown import markdown
+ if text:
+ text = text.replace('<', '<')
+ # TODO: automatically get page title?
+ text = r_url.sub(r'<\1>', text)
+ text = markdown(text)
+ return text
+
+def sendmail(from_address, to_address, subject, message, headers=None, **kw):
+ """
+ Sends the email message `message` with mail and envelope headers
+ for from `from_address_` to `to_address` with `subject`.
+ Additional email headers can be specified with the dictionary
+ `headers.
+
+ If `web.config.smtp_server` is set, it will send the message
+ to that SMTP server. Otherwise it will look for
+ `/usr/sbin/sendmail`, the typical location for the sendmail-style
+ binary. To use sendmail from a different path, set `web.config.sendmail_path`.
+ """
+ try:
+ import webapi
+ except ImportError:
+ webapi = Storage(config=Storage())
+
+ if headers is None: headers = {}
+
+ cc = kw.get('cc', [])
+ bcc = kw.get('bcc', [])
+
+ def listify(x):
+ if not isinstance(x, list):
+ return [safestr(x)]
+ else:
+ return [safestr(a) for a in x]
+
+ from_address = safestr(from_address)
+
+ to_address = listify(to_address)
+ cc = listify(cc)
+ bcc = listify(bcc)
+
+ recipients = to_address + cc + bcc
+
+ headers = dictadd({
+ 'MIME-Version': '1.0',
+ 'Content-Type': 'text/plain; charset=UTF-8',
+ 'Content-Disposition': 'inline',
+ 'From': from_address,
+ 'To': ", ".join(to_address),
+ 'Subject': subject
+ }, headers)
+
+ if cc:
+ headers['Cc'] = ", ".join(cc)
+
+ import email.Utils
+ from_address = email.Utils.parseaddr(from_address)[1]
+ recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
+ message = ('\n'.join([safestr('%s: %s' % x) for x in headers.iteritems()])
+ + "\n\n" + safestr(message))
+
+ if webapi.config.get('smtp_server'):
+ server = webapi.config.get('smtp_server')
+ port = webapi.config.get('smtp_port', 0)
+ username = webapi.config.get('smtp_username')
+ password = webapi.config.get('smtp_password')
+ debug_level = webapi.config.get('smtp_debuglevel', None)
+ starttls = webapi.config.get('smtp_starttls', False)
+
+ import smtplib
+ smtpserver = smtplib.SMTP(server, port)
+
+ if debug_level:
+ smtpserver.set_debuglevel(debug_level)
+
+ if starttls:
+ smtpserver.ehlo()
+ smtpserver.starttls()
+ smtpserver.ehlo()
+
+ if username and password:
+ smtpserver.login(username, password)
+
+ smtpserver.sendmail(from_address, recipients, message)
+ smtpserver.quit()
+ else:
+ sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
+
+ assert not from_address.startswith('-'), 'security'
+ for r in recipients:
+ assert not r.startswith('-'), 'security'
+
+
+ if subprocess:
+ p = subprocess.Popen(['/usr/sbin/sendmail', '-f', from_address] + recipients, stdin=subprocess.PIPE)
+ p.stdin.write(message)
+ p.stdin.close()
+ p.wait()
+ else:
+ import os
+ i, o = os.popen2(["/usr/lib/sendmail", '-f', from_address] + recipients)
+ i.write(message)
+ i.close()
+ o.close()
+ del i, o
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/webapi.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,368 @@
+"""
+Web API (wrapper around WSGI)
+(from web.py)
+"""
+
+__all__ = [
+ "config",
+ "header", "debug",
+ "input", "data",
+ "setcookie", "cookies",
+ "ctx",
+ "HTTPError",
+
+ # 200, 201, 202
+ "OK", "Created", "Accepted",
+ "ok", "created", "accepted",
+
+ # 301, 302, 303, 304, 407
+ "Redirect", "Found", "SeeOther", "NotModified", "TempRedirect",
+ "redirect", "found", "seeother", "notmodified", "tempredirect",
+
+ # 400, 401, 403, 404, 405, 406, 409, 410, 412
+ "BadRequest", "Unauthorized", "Forbidden", "NoMethod", "NotFound", "NotAcceptable", "Conflict", "Gone", "PreconditionFailed",
+ "badrequest", "unauthorized", "forbidden", "nomethod", "notfound", "notacceptable", "conflict", "gone", "preconditionfailed",
+
+ # 500
+ "InternalError",
+ "internalerror",
+]
+
+import sys, cgi, Cookie, pprint, urlparse, urllib
+from utils import storage, storify, threadeddict, dictadd, intget, utf8
+
+config = storage()
+config.__doc__ = """
+A configuration object for various aspects of web.py.
+
+`debug`
+ : when True, enables reloading, disabled template caching and sets internalerror to debugerror.
+"""
+
+class HTTPError(Exception):
+ def __init__(self, status, headers={}, data=""):
+ ctx.status = status
+ for k, v in headers.items():
+ header(k, v)
+ self.data = data
+ Exception.__init__(self, status)
+
+def _status_code(status, data=None, classname=None, docstring=None):
+ if data is None:
+ data = status.split(" ", 1)[1]
+ classname = status.split(" ", 1)[1].replace(' ', '') # 304 Not Modified -> NotModified
+ docstring = docstring or '`%s` status' % status
+
+ def __init__(self, data=data, headers={}):
+ HTTPError.__init__(self, status, headers, data)
+
+ # trick to create class dynamically with dynamic docstring.
+ return type(classname, (HTTPError, object), {
+ '__doc__': docstring,
+ '__init__': __init__
+ })
+
+ok = OK = _status_code("200 OK", data="")
+created = Created = _status_code("201 Created")
+accepted = Accepted = _status_code("202 Accepted")
+
+class Redirect(HTTPError):
+ """A `301 Moved Permanently` redirect."""
+ def __init__(self, url, status='301 Moved Permanently', absolute=False):
+ """
+ Returns a `status` redirect to the new URL.
+ `url` is joined with the base URL so that things like
+ `redirect("about") will work properly.
+ """
+ newloc = urlparse.urljoin(ctx.path, url)
+
+ if newloc.startswith('/'):
+ if absolute:
+ home = ctx.realhome
+ else:
+ home = ctx.home
+ newloc = home + newloc
+
+ headers = {
+ 'Content-Type': 'text/html',
+ 'Location': newloc
+ }
+ HTTPError.__init__(self, status, headers, "")
+
+redirect = Redirect
+
+class Found(Redirect):
+ """A `302 Found` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '302 Found', absolute=absolute)
+
+found = Found
+
+class SeeOther(Redirect):
+ """A `303 See Other` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '303 See Other', absolute=absolute)
+
+seeother = SeeOther
+
+class NotModified(HTTPError):
+ """A `304 Not Modified` status."""
+ def __init__(self):
+ HTTPError.__init__(self, "304 Not Modified")
+
+notmodified = NotModified
+
+class TempRedirect(Redirect):
+ """A `307 Temporary Redirect` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '307 Temporary Redirect', absolute=absolute)
+
+tempredirect = TempRedirect
+
+class BadRequest(HTTPError):
+ """`400 Bad Request` error."""
+ message = "bad request"
+ def __init__(self):
+ status = "400 Bad Request"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, self.message)
+
+badrequest = BadRequest
+
+class _NotFound(HTTPError):
+ """`404 Not Found` error."""
+ message = "not found"
+ def __init__(self, message=None):
+ status = '404 Not Found'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+def NotFound(message=None):
+ """Returns HTTPError with '404 Not Found' error from the active application.
+ """
+ if message:
+ return _NotFound(message)
+ elif ctx.get('app_stack'):
+ return ctx.app_stack[-1].notfound()
+ else:
+ return _NotFound()
+
+notfound = NotFound
+
+unauthorized = Unauthorized = _status_code("401 Unauthorized")
+forbidden = Forbidden = _status_code("403 Forbidden")
+notacceptable = NotAcceptable = _status_code("406 Not Acceptable")
+conflict = Conflict = _status_code("409 Conflict")
+preconditionfailed = PreconditionFailed = _status_code("412 Precondition Failed")
+
+class NoMethod(HTTPError):
+ """A `405 Method Not Allowed` error."""
+ def __init__(self, cls=None):
+ status = '405 Method Not Allowed'
+ headers = {}
+ headers['Content-Type'] = 'text/html'
+
+ methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE']
+ if cls:
+ methods = [method for method in methods if hasattr(cls, method)]
+
+ headers['Allow'] = ', '.join(methods)
+ data = None
+ HTTPError.__init__(self, status, headers, data)
+
+nomethod = NoMethod
+
+class Gone(HTTPError):
+ """`410 Gone` error."""
+ message = "gone"
+ def __init__(self):
+ status = '410 Gone'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, self.message)
+
+gone = Gone
+
+class _InternalError(HTTPError):
+ """500 Internal Server Error`."""
+ message = "internal server error"
+
+ def __init__(self, message=None):
+ status = '500 Internal Server Error'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+def InternalError(message=None):
+ """Returns HTTPError with '500 internal error' error from the active application.
+ """
+ if message:
+ return _InternalError(message)
+ elif ctx.get('app_stack'):
+ return ctx.app_stack[-1].internalerror()
+ else:
+ return _InternalError()
+
+internalerror = InternalError
+
+def header(hdr, value, unique=False):
+ """
+ Adds the header `hdr: value` with the response.
+
+ If `unique` is True and a header with that name already exists,
+ it doesn't add a new one.
+ """
+ hdr, value = utf8(hdr), utf8(value)
+ # protection against HTTP response splitting attack
+ if '\n' in hdr or '\r' in hdr or '\n' in value or '\r' in value:
+ raise ValueError, 'invalid characters in header'
+
+ if unique is True:
+ for h, v in ctx.headers:
+ if h.lower() == hdr.lower(): return
+
+ ctx.headers.append((hdr, value))
+
+def input(*requireds, **defaults):
+ """
+ Returns a `storage` object with the GET and POST arguments.
+ See `storify` for how `requireds` and `defaults` work.
+ """
+ from cStringIO import StringIO
+ def dictify(fs):
+ # hack to make web.input work with enctype='text/plain.
+ if fs.list is None:
+ fs.list = []
+
+ return dict([(k, fs[k]) for k in fs.keys()])
+
+ _method = defaults.pop('_method', 'both')
+
+ e = ctx.env.copy()
+ a = b = {}
+
+ if _method.lower() in ['both', 'post', 'put']:
+ if e['REQUEST_METHOD'] in ['POST', 'PUT']:
+ if e.get('CONTENT_TYPE', '').lower().startswith('multipart/'):
+ # since wsgi.input is directly passed to cgi.FieldStorage,
+ # it can not be called multiple times. Saving the FieldStorage
+ # object in ctx to allow calling web.input multiple times.
+ a = ctx.get('_fieldstorage')
+ if not a:
+ fp = e['wsgi.input']
+ a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
+ ctx._fieldstorage = a
+ else:
+ fp = StringIO(data())
+ a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
+ a = dictify(a)
+
+ if _method.lower() in ['both', 'get']:
+ e['REQUEST_METHOD'] = 'GET'
+ b = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
+
+ out = dictadd(b, a)
+ try:
+ defaults.setdefault('_unicode', True) # force unicode conversion by default.
+ return storify(out, *requireds, **defaults)
+ except KeyError:
+ raise badrequest()
+
+def data():
+ """Returns the data sent with the request."""
+ if 'data' not in ctx:
+ cl = intget(ctx.env.get('CONTENT_LENGTH'), 0)
+ ctx.data = ctx.env['wsgi.input'].read(cl)
+ return ctx.data
+
+def setcookie(name, value, expires="", domain=None, secure=False):
+ """Sets a cookie."""
+ if expires < 0:
+ expires = -1000000000
+ kargs = {'expires': expires, 'path':'/'}
+ if domain:
+ kargs['domain'] = domain
+ if secure:
+ kargs['secure'] = secure
+ # @@ should we limit cookies to a different path?
+ cookie = Cookie.SimpleCookie()
+ cookie[name] = urllib.quote(utf8(value))
+ for key, val in kargs.iteritems():
+ cookie[name][key] = val
+ header('Set-Cookie', cookie.items()[0][1].OutputString())
+
+def cookies(*requireds, **defaults):
+ """
+ Returns a `storage` object with all the cookies in it.
+ See `storify` for how `requireds` and `defaults` work.
+ """
+ cookie = Cookie.SimpleCookie()
+ cookie.load(ctx.env.get('HTTP_COOKIE', ''))
+ try:
+ d = storify(cookie, *requireds, **defaults)
+ for k, v in d.items():
+ d[k] = v and urllib.unquote(v)
+ return d
+ except KeyError:
+ badrequest()
+ raise StopIteration
+
+def debug(*args):
+ """
+ Prints a prettyprinted version of `args` to stderr.
+ """
+ try:
+ out = ctx.environ['wsgi.errors']
+ except:
+ out = sys.stderr
+ for arg in args:
+ print >> out, pprint.pformat(arg)
+ return ''
+
+def _debugwrite(x):
+ try:
+ out = ctx.environ['wsgi.errors']
+ except:
+ out = sys.stderr
+ out.write(x)
+debug.write = _debugwrite
+
+ctx = context = threadeddict()
+
+ctx.__doc__ = """
+A `storage` object containing various information about the request:
+
+`environ` (aka `env`)
+ : A dictionary containing the standard WSGI environment variables.
+
+`host`
+ : The domain (`Host` header) requested by the user.
+
+`home`
+ : The base path for the application.
+
+`ip`
+ : The IP address of the requester.
+
+`method`
+ : The HTTP method used.
+
+`path`
+ : The path request.
+
+`query`
+ : If there are no query arguments, the empty string. Otherwise, a `?` followed
+ by the query string.
+
+`fullpath`
+ : The full path requested, including query arguments (`== path + query`).
+
+### Response Data
+
+`status` (default: "200 OK")
+ : The status code to be used in the response.
+
+`headers`
+ : A list of 2-tuples to be used in the response.
+
+`output`
+ : A string to be used as the response.
+"""
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/webopenid.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,115 @@
+"""openid.py: an openid library for web.py
+
+Notes:
+
+ - This will create a file called .openid_secret_key in the
+ current directory with your secret key in it. If someone
+ has access to this file they can log in as any user. And
+ if the app can't find this file for any reason (e.g. you
+ moved the app somewhere else) then each currently logged
+ in user will get logged out.
+
+ - State must be maintained through the entire auth process
+ -- this means that if you have multiple web.py processes
+ serving one set of URLs or if you restart your app often
+ then log ins will fail. You have to replace sessions and
+ store for things to work.
+
+ - We set cookies starting with "openid_".
+
+"""
+
+import os
+import random
+import hmac
+import __init__ as web
+import openid.consumer.consumer
+import openid.store.memstore
+
+sessions = {}
+store = openid.store.memstore.MemoryStore()
+
+def _secret():
+ try:
+ secret = file('.openid_secret_key').read()
+ except IOError:
+ # file doesn't exist
+ secret = os.urandom(20)
+ file('.openid_secret_key', 'w').write(secret)
+ return secret
+
+def _hmac(identity_url):
+ return hmac.new(_secret(), identity_url).hexdigest()
+
+def _random_session():
+ n = random.random()
+ while n in sessions:
+ n = random.random()
+ n = str(n)
+ return n
+
+def status():
+ oid_hash = web.cookies().get('openid_identity_hash', '').split(',', 1)
+ if len(oid_hash) > 1:
+ oid_hash, identity_url = oid_hash
+ if oid_hash == _hmac(identity_url):
+ return identity_url
+ return None
+
+def form(openid_loc):
+ oid = status()
+ if oid:
+ return '''
+ <form method="post" action="%s">
+ <img src="http://openid.net/login-bg.gif" alt="OpenID" />
+ <strong>%s</strong>
+ <input type="hidden" name="action" value="logout" />
+ <input type="hidden" name="return_to" value="%s" />
+ <button type="submit">log out</button>
+ </form>''' % (openid_loc, oid, web.ctx.fullpath)
+ else:
+ return '''
+ <form method="post" action="%s">
+ <input type="text" name="openid" value=""
+ style="background: url(http://openid.net/login-bg.gif) no-repeat; padding-left: 18px; background-position: 0 50%%;" />
+ <input type="hidden" name="return_to" value="%s" />
+ <button type="submit">log in</button>
+ </form>''' % (openid_loc, web.ctx.fullpath)
+
+def logout():
+ web.setcookie('openid_identity_hash', '', expires=-1)
+
+class host:
+ def POST(self):
+ # unlike the usual scheme of things, the POST is actually called
+ # first here
+ i = web.input(return_to='/')
+ if i.get('action') == 'logout':
+ logout()
+ return web.redirect(i.return_to)
+
+ i = web.input('openid', return_to='/')
+
+ n = _random_session()
+ sessions[n] = {'webpy_return_to': i.return_to}
+
+ c = openid.consumer.consumer.Consumer(sessions[n], store)
+ a = c.begin(i.openid)
+ f = a.redirectURL(web.ctx.home, web.ctx.home + web.ctx.fullpath)
+
+ web.setcookie('openid_session_id', n)
+ return web.redirect(f)
+
+ def GET(self):
+ n = web.cookies('openid_session_id').openid_session_id
+ web.setcookie('openid_session_id', '', expires=-1)
+ return_to = sessions[n]['webpy_return_to']
+
+ c = openid.consumer.consumer.Consumer(sessions[n], store)
+ a = c.complete(web.input(), web.ctx.home + web.ctx.fullpath)
+
+ if a.status.lower() == 'success':
+ web.setcookie('openid_identity_hash', _hmac(a.identity_url) + ',' + a.identity_url)
+
+ del sessions[n]
+ return web.redirect(return_to)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/wsgi.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,66 @@
+"""
+WSGI Utilities
+(from web.py)
+"""
+
+import os, sys
+
+import http
+import webapi as web
+from utils import listget
+from net import validaddr, validip
+import httpserver
+
+def runfcgi(func, addr=('localhost', 8000)):
+ """Runs a WSGI function as a FastCGI server."""
+ import flup.server.fcgi as flups
+ return flups.WSGIServer(func, multiplexed=True, bindAddress=addr).run()
+
+def runscgi(func, addr=('localhost', 4000)):
+ """Runs a WSGI function as an SCGI server."""
+ import flup.server.scgi as flups
+ return flups.WSGIServer(func, bindAddress=addr).run()
+
+def runwsgi(func):
+ """
+ Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
+ as appropriate based on context and `sys.argv`.
+ """
+
+ if os.environ.has_key('SERVER_SOFTWARE'): # cgi
+ os.environ['FCGI_FORCE_CGI'] = 'Y'
+
+ if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
+ or os.environ.has_key('SERVER_SOFTWARE')):
+ return runfcgi(func, None)
+
+ if 'fcgi' in sys.argv or 'fastcgi' in sys.argv:
+ args = sys.argv[1:]
+ if 'fastcgi' in args: args.remove('fastcgi')
+ elif 'fcgi' in args: args.remove('fcgi')
+ if args:
+ return runfcgi(func, validaddr(args[0]))
+ else:
+ return runfcgi(func, None)
+
+ if 'scgi' in sys.argv:
+ args = sys.argv[1:]
+ args.remove('scgi')
+ if args:
+ return runscgi(func, validaddr(args[0]))
+ else:
+ return runscgi(func)
+
+ return httpserver.runsimple(func, validip(listget(sys.argv, 1, '')))
+
+def _is_dev_mode():
+ # quick hack to check if the program is running in dev mode.
+ if os.environ.has_key('SERVER_SOFTWARE') \
+ or os.environ.has_key('PHP_FCGI_CHILDREN') \
+ or 'fcgi' in sys.argv or 'fastcgi' in sys.argv \
+ or 'mod_wsgi' in sys.argv:
+ return False
+ return True
+
+# When running the builtin-server, enable debug mode if not already set.
+web.config.setdefault('debug', _is_dev_mode())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/wsgiserver/LICENSE.txt Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,25 @@
+Copyright (c) 2004-2007, CherryPy Team (team@cherrypy.org)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the CherryPy Team nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/webpy/web/wsgiserver/__init__.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,1794 @@
+"""A high-speed, production ready, thread pooled, generic WSGI server.
+
+Simplest example on how to use this module directly
+(without using CherryPy's application machinery):
+
+ from cherrypy import wsgiserver
+
+ def my_crazy_app(environ, start_response):
+ status = '200 OK'
+ response_headers = [('Content-type','text/plain')]
+ start_response(status, response_headers)
+ return ['Hello world!\n']
+
+ server = wsgiserver.CherryPyWSGIServer(
+ ('0.0.0.0', 8070), my_crazy_app,
+ server_name='www.cherrypy.example')
+
+The CherryPy WSGI server can serve as many WSGI applications
+as you want in one instance by using a WSGIPathInfoDispatcher:
+
+ d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
+ server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
+
+Want SSL support? Just set these attributes:
+
+ server.ssl_certificate = <filename>
+ server.ssl_private_key = <filename>
+
+ if __name__ == '__main__':
+ try:
+ server.start()
+ except KeyboardInterrupt:
+ server.stop()
+
+This won't call the CherryPy engine (application side) at all, only the
+WSGI server, which is independant from the rest of CherryPy. Don't
+let the name "CherryPyWSGIServer" throw you; the name merely reflects
+its origin, not its coupling.
+
+For those of you wanting to understand internals of this module, here's the
+basic call flow. The server's listening thread runs a very tight loop,
+sticking incoming connections onto a Queue:
+
+ server = CherryPyWSGIServer(...)
+ server.start()
+ while True:
+ tick()
+ # This blocks until a request comes in:
+ child = socket.accept()
+ conn = HTTPConnection(child, ...)
+ server.requests.put(conn)
+
+Worker threads are kept in a pool and poll the Queue, popping off and then
+handling each connection in turn. Each connection can consist of an arbitrary
+number of requests and their responses, so we run a nested loop:
+
+ while True:
+ conn = server.requests.get()
+ conn.communicate()
+ -> while True:
+ req = HTTPRequest(...)
+ req.parse_request()
+ -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
+ req.rfile.readline()
+ req.read_headers()
+ req.respond()
+ -> response = wsgi_app(...)
+ try:
+ for chunk in response:
+ if chunk:
+ req.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+ if req.close_connection:
+ return
+"""
+
+
+import base64
+import os
+import Queue
+import re
+quoted_slash = re.compile("(?i)%2F")
+import rfc822
+import socket
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+
+_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
+
+import sys
+import threading
+import time
+import traceback
+from urllib import unquote
+from urlparse import urlparse
+import warnings
+
+try:
+ from OpenSSL import SSL
+ from OpenSSL import crypto
+except ImportError:
+ SSL = None
+
+import errno
+
+def plat_specific_errors(*errnames):
+ """Return error numbers for all errors in errnames on this platform.
+
+ The 'errno' module contains different global constants depending on
+ the specific platform (OS). This function will return the list of
+ numeric values for a given list of potential names.
+ """
+ errno_names = dir(errno)
+ nums = [getattr(errno, k) for k in errnames if k in errno_names]
+ # de-dupe the list
+ return dict.fromkeys(nums).keys()
+
+socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
+
+socket_errors_to_ignore = plat_specific_errors(
+ "EPIPE",
+ "EBADF", "WSAEBADF",
+ "ENOTSOCK", "WSAENOTSOCK",
+ "ETIMEDOUT", "WSAETIMEDOUT",
+ "ECONNREFUSED", "WSAECONNREFUSED",
+ "ECONNRESET", "WSAECONNRESET",
+ "ECONNABORTED", "WSAECONNABORTED",
+ "ENETRESET", "WSAENETRESET",
+ "EHOSTDOWN", "EHOSTUNREACH",
+ )
+socket_errors_to_ignore.append("timed out")
+
+socket_errors_nonblocking = plat_specific_errors(
+ 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
+
+comma_separated_headers = ['ACCEPT', 'ACCEPT-CHARSET', 'ACCEPT-ENCODING',
+ 'ACCEPT-LANGUAGE', 'ACCEPT-RANGES', 'ALLOW', 'CACHE-CONTROL',
+ 'CONNECTION', 'CONTENT-ENCODING', 'CONTENT-LANGUAGE', 'EXPECT',
+ 'IF-MATCH', 'IF-NONE-MATCH', 'PRAGMA', 'PROXY-AUTHENTICATE', 'TE',
+ 'TRAILER', 'TRANSFER-ENCODING', 'UPGRADE', 'VARY', 'VIA', 'WARNING',
+ 'WWW-AUTHENTICATE']
+
+
+class WSGIPathInfoDispatcher(object):
+ """A WSGI dispatcher for dispatch based on the PATH_INFO.
+
+ apps: a dict or list of (path_prefix, app) pairs.
+ """
+
+ def __init__(self, apps):
+ try:
+ apps = apps.items()
+ except AttributeError:
+ pass
+
+ # Sort the apps by len(path), descending
+ apps.sort()
+ apps.reverse()
+
+ # The path_prefix strings must start, but not end, with a slash.
+ # Use "" instead of "/".
+ self.apps = [(p.rstrip("/"), a) for p, a in apps]
+
+ def __call__(self, environ, start_response):
+ path = environ["PATH_INFO"] or "/"
+ for p, app in self.apps:
+ # The apps list should be sorted by length, descending.
+ if path.startswith(p + "/") or path == p:
+ environ = environ.copy()
+ environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
+ environ["PATH_INFO"] = path[len(p):]
+ return app(environ, start_response)
+
+ start_response('404 Not Found', [('Content-Type', 'text/plain'),
+ ('Content-Length', '0')])
+ return ['']
+
+
+class MaxSizeExceeded(Exception):
+ pass
+
+class SizeCheckWrapper(object):
+ """Wraps a file-like object, raising MaxSizeExceeded if too large."""
+
+ def __init__(self, rfile, maxlen):
+ self.rfile = rfile
+ self.maxlen = maxlen
+ self.bytes_read = 0
+
+ def _check_length(self):
+ if self.maxlen and self.bytes_read > self.maxlen:
+ raise MaxSizeExceeded()
+
+ def read(self, size=None):
+ data = self.rfile.read(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ def readline(self, size=None):
+ if size is not None:
+ data = self.rfile.readline(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ # User didn't specify a size ...
+ # We read the line in chunks to make sure it's not a 100MB line !
+ res = []
+ while True:
+ data = self.rfile.readline(256)
+ self.bytes_read += len(data)
+ self._check_length()
+ res.append(data)
+ # See http://www.cherrypy.org/ticket/421
+ if len(data) < 256 or data[-1:] == "\n":
+ return ''.join(res)
+
+ def readlines(self, sizehint=0):
+ # Shamelessly stolen from StringIO
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def close(self):
+ self.rfile.close()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ data = self.rfile.next()
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+
+class HTTPRequest(object):
+ """An HTTP Request (and response).
+
+ A single HTTP connection may consist of multiple request/response pairs.
+
+ send: the 'send' method from the connection's socket object.
+ wsgi_app: the WSGI application to call.
+ environ: a partial WSGI environ (server and connection entries).
+ The caller MUST set the following entries:
+ * All wsgi.* entries, including .input
+ * SERVER_NAME and SERVER_PORT
+ * Any SSL_* entries
+ * Any custom entries like REMOTE_ADDR and REMOTE_PORT
+ * SERVER_SOFTWARE: the value to write in the "Server" response header.
+ * ACTUAL_SERVER_PROTOCOL: the value to write in the Status-Line of
+ the response. From RFC 2145: "An HTTP server SHOULD send a
+ response version equal to the highest version for which the
+ server is at least conditionally compliant, and whose major
+ version is less than or equal to the one received in the
+ request. An HTTP server MUST NOT send a version for which
+ it is not at least conditionally compliant."
+
+ outheaders: a list of header tuples to write in the response.
+ ready: when True, the request has been parsed and is ready to begin
+ generating the response. When False, signals the calling Connection
+ that the response should not be generated and the connection should
+ close.
+ close_connection: signals the calling Connection that the request
+ should close. This does not imply an error! The client and/or
+ server may each request that the connection be closed.
+ chunked_write: if True, output will be encoded with the "chunked"
+ transfer-coding. This value is set automatically inside
+ send_headers.
+ """
+
+ max_request_header_size = 0
+ max_request_body_size = 0
+
+ def __init__(self, wfile, environ, wsgi_app):
+ self.rfile = environ['wsgi.input']
+ self.wfile = wfile
+ self.environ = environ.copy()
+ self.wsgi_app = wsgi_app
+
+ self.ready = False
+ self.started_response = False
+ self.status = ""
+ self.outheaders = []
+ self.sent_headers = False
+ self.close_connection = False
+ self.chunked_write = False
+
+ def parse_request(self):
+ """Parse the next HTTP request start-line and message-headers."""
+ self.rfile.maxlen = self.max_request_header_size
+ self.rfile.bytes_read = 0
+
+ try:
+ self._parse_request()
+ except MaxSizeExceeded:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ def _parse_request(self):
+ # HTTP/1.1 connections are persistent by default. If a client
+ # requests a page, then idles (leaves the connection open),
+ # then rfile.readline() will raise socket.error("timed out").
+ # Note that it does this based on the value given to settimeout(),
+ # and doesn't need the client to request or acknowledge the close
+ # (although your TCP stack might suffer for it: cf Apache's history
+ # with FIN_WAIT_2).
+ request_line = self.rfile.readline()
+ if not request_line:
+ # Force self.ready = False so the connection will close.
+ self.ready = False
+ return
+
+ if request_line == "\r\n":
+ # RFC 2616 sec 4.1: "...if the server is reading the protocol
+ # stream at the beginning of a message and receives a CRLF
+ # first, it should ignore the CRLF."
+ # But only ignore one leading line! else we enable a DoS.
+ request_line = self.rfile.readline()
+ if not request_line:
+ self.ready = False
+ return
+
+ environ = self.environ
+
+ try:
+ method, path, req_protocol = request_line.strip().split(" ", 2)
+ except ValueError:
+ self.simple_response(400, "Malformed Request-Line")
+ return
+
+ environ["REQUEST_METHOD"] = method
+
+ # path may be an abs_path (including "http://host.domain.tld");
+ scheme, location, path, params, qs, frag = urlparse(path)
+
+ if frag:
+ self.simple_response("400 Bad Request",
+ "Illegal #fragment in Request-URI.")
+ return
+
+ if scheme:
+ environ["wsgi.url_scheme"] = scheme
+ if params:
+ path = path + ";" + params
+
+ environ["SCRIPT_NAME"] = ""
+
+ # Unquote the path+params (e.g. "/this%20path" -> "this path").
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+ #
+ # But note that "...a URI must be separated into its components
+ # before the escaped characters within those components can be
+ # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
+ atoms = [unquote(x) for x in quoted_slash.split(path)]
+ path = "%2F".join(atoms)
+ environ["PATH_INFO"] = path
+
+ # Note that, like wsgiref and most other WSGI servers,
+ # we unquote the path but not the query string.
+ environ["QUERY_STRING"] = qs
+
+ # Compare request and server HTTP protocol versions, in case our
+ # server does not support the requested protocol. Limit our output
+ # to min(req, server). We want the following output:
+ # request server actual written supported response
+ # protocol protocol response protocol feature set
+ # a 1.0 1.0 1.0 1.0
+ # b 1.0 1.1 1.1 1.0
+ # c 1.1 1.0 1.0 1.0
+ # d 1.1 1.1 1.1 1.1
+ # Notice that, in (b), the response will be "HTTP/1.1" even though
+ # the client only understands 1.0. RFC 2616 10.5.6 says we should
+ # only return 505 if the _major_ version is different.
+ rp = int(req_protocol[5]), int(req_protocol[7])
+ server_protocol = environ["ACTUAL_SERVER_PROTOCOL"]
+ sp = int(server_protocol[5]), int(server_protocol[7])
+ if sp[0] != rp[0]:
+ self.simple_response("505 HTTP Version Not Supported")
+ return
+ # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
+ environ["SERVER_PROTOCOL"] = req_protocol
+ self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
+
+ # If the Request-URI was an absoluteURI, use its location atom.
+ if location:
+ environ["SERVER_NAME"] = location
+
+ # then all the http headers
+ try:
+ self.read_headers()
+ except ValueError, ex:
+ self.simple_response("400 Bad Request", repr(ex.args))
+ return
+
+ mrbs = self.max_request_body_size
+ if mrbs and int(environ.get("CONTENT_LENGTH", 0)) > mrbs:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ # Persistent connection support
+ if self.response_protocol == "HTTP/1.1":
+ # Both server and client are HTTP/1.1
+ if environ.get("HTTP_CONNECTION", "") == "close":
+ self.close_connection = True
+ else:
+ # Either the server or client (or both) are HTTP/1.0
+ if environ.get("HTTP_CONNECTION", "") != "Keep-Alive":
+ self.close_connection = True
+
+ # Transfer-Encoding support
+ te = None
+ if self.response_protocol == "HTTP/1.1":
+ te = environ.get("HTTP_TRANSFER_ENCODING")
+ if te:
+ te = [x.strip().lower() for x in te.split(",") if x.strip()]
+
+ self.chunked_read = False
+
+ if te:
+ for enc in te:
+ if enc == "chunked":
+ self.chunked_read = True
+ else:
+ # Note that, even if we see "chunked", we must reject
+ # if there is an extension we don't recognize.
+ self.simple_response("501 Unimplemented")
+ self.close_connection = True
+ return
+
+ # From PEP 333:
+ # "Servers and gateways that implement HTTP 1.1 must provide
+ # transparent support for HTTP 1.1's "expect/continue" mechanism.
+ # This may be done in any of several ways:
+ # 1. Respond to requests containing an Expect: 100-continue request
+ # with an immediate "100 Continue" response, and proceed normally.
+ # 2. Proceed with the request normally, but provide the application
+ # with a wsgi.input stream that will send the "100 Continue"
+ # response if/when the application first attempts to read from
+ # the input stream. The read request must then remain blocked
+ # until the client responds.
+ # 3. Wait until the client decides that the server does not support
+ # expect/continue, and sends the request body on its own.
+ # (This is suboptimal, and is not recommended.)
+ #
+ # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
+ # but it seems like it would be a big slowdown for such a rare case.
+ if environ.get("HTTP_EXPECT", "") == "100-continue":
+ self.simple_response(100)
+
+ self.ready = True
+
+ def read_headers(self):
+ """Read header lines from the incoming stream."""
+ environ = self.environ
+
+ while True:
+ line = self.rfile.readline()
+ if not line:
+ # No more data--illegal end of headers
+ raise ValueError("Illegal end of headers.")
+
+ if line == '\r\n':
+ # Normal end of headers
+ break
+
+ if line[0] in ' \t':
+ # It's a continuation line.
+ v = line.strip()
+ else:
+ k, v = line.split(":", 1)
+ k, v = k.strip().upper(), v.strip()
+ envname = "HTTP_" + k.replace("-", "_")
+
+ if k in comma_separated_headers:
+ existing = environ.get(envname)
+ if existing:
+ v = ", ".join((existing, v))
+ environ[envname] = v
+
+ ct = environ.pop("HTTP_CONTENT_TYPE", None)
+ if ct is not None:
+ environ["CONTENT_TYPE"] = ct
+ cl = environ.pop("HTTP_CONTENT_LENGTH", None)
+ if cl is not None:
+ environ["CONTENT_LENGTH"] = cl
+
+ def decode_chunked(self):
+ """Decode the 'chunked' transfer coding."""
+ cl = 0
+ data = StringIO.StringIO()
+ while True:
+ line = self.rfile.readline().strip().split(";", 1)
+ chunk_size = int(line.pop(0), 16)
+ if chunk_size <= 0:
+ break
+## if line: chunk_extension = line[0]
+ cl += chunk_size
+ data.write(self.rfile.read(chunk_size))
+ crlf = self.rfile.read(2)
+ if crlf != "\r\n":
+ self.simple_response("400 Bad Request",
+ "Bad chunked transfer coding "
+ "(expected '\\r\\n', got %r)" % crlf)
+ return
+
+ # Grab any trailer headers
+ self.read_headers()
+
+ data.seek(0)
+ self.environ["wsgi.input"] = data
+ self.environ["CONTENT_LENGTH"] = str(cl) or ""
+ return True
+
+ def respond(self):
+ """Call the appropriate WSGI app and write its iterable output."""
+ # Set rfile.maxlen to ensure we don't read past Content-Length.
+ # This will also be used to read the entire request body if errors
+ # are raised before the app can read the body.
+ if self.chunked_read:
+ # If chunked, Content-Length will be 0.
+ self.rfile.maxlen = self.max_request_body_size
+ else:
+ cl = int(self.environ.get("CONTENT_LENGTH", 0))
+ if self.max_request_body_size:
+ self.rfile.maxlen = min(cl, self.max_request_body_size)
+ else:
+ self.rfile.maxlen = cl
+ self.rfile.bytes_read = 0
+
+ try:
+ self._respond()
+ except MaxSizeExceeded:
+ if not self.sent_headers:
+ self.simple_response("413 Request Entity Too Large")
+ return
+
+ def _respond(self):
+ if self.chunked_read:
+ if not self.decode_chunked():
+ self.close_connection = True
+ return
+
+ response = self.wsgi_app(self.environ, self.start_response)
+ try:
+ for chunk in response:
+ # "The start_response callable must not actually transmit
+ # the response headers. Instead, it must store them for the
+ # server or gateway to transmit only after the first
+ # iteration of the application return value that yields
+ # a NON-EMPTY string, or upon the application's first
+ # invocation of the write() callable." (PEP 333)
+ if chunk:
+ self.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+
+ if (self.ready and not self.sent_headers):
+ self.sent_headers = True
+ self.send_headers()
+ if self.chunked_write:
+ self.wfile.sendall("0\r\n\r\n")
+
+ def simple_response(self, status, msg=""):
+ """Write a simple response back to the client."""
+ status = str(status)
+ buf = ["%s %s\r\n" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),
+ "Content-Length: %s\r\n" % len(msg),
+ "Content-Type: text/plain\r\n"]
+
+ if status[:3] == "413" and self.response_protocol == 'HTTP/1.1':
+ # Request Entity Too Large
+ self.close_connection = True
+ buf.append("Connection: close\r\n")
+
+ buf.append("\r\n")
+ if msg:
+ buf.append(msg)
+
+ try:
+ self.wfile.sendall("".join(buf))
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+
+ def start_response(self, status, headers, exc_info = None):
+ """WSGI callable to begin the HTTP response."""
+ # "The application may call start_response more than once,
+ # if and only if the exc_info argument is provided."
+ if self.started_response and not exc_info:
+ raise AssertionError("WSGI start_response called a second "
+ "time with no exc_info.")
+
+ # "if exc_info is provided, and the HTTP headers have already been
+ # sent, start_response must raise an error, and should raise the
+ # exc_info tuple."
+ if self.sent_headers:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None
+
+ self.started_response = True
+ self.status = status
+ self.outheaders.extend(headers)
+ return self.write
+
+ def write(self, chunk):
+ """WSGI callable to write unbuffered data to the client.
+
+ This method is also used internally by start_response (to write
+ data from the iterable returned by the WSGI application).
+ """
+ if not self.started_response:
+ raise AssertionError("WSGI write called before start_response.")
+
+ if not self.sent_headers:
+ self.sent_headers = True
+ self.send_headers()
+
+ if self.chunked_write and chunk:
+ buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"]
+ self.wfile.sendall("".join(buf))
+ else:
+ self.wfile.sendall(chunk)
+
+ def send_headers(self):
+ """Assert, process, and send the HTTP response message-headers."""
+ hkeys = [key.lower() for key, value in self.outheaders]
+ status = int(self.status[:3])
+
+ if status == 413:
+ # Request Entity Too Large. Close conn to avoid garbage.
+ self.close_connection = True
+ elif "content-length" not in hkeys:
+ # "All 1xx (informational), 204 (no content),
+ # and 304 (not modified) responses MUST NOT
+ # include a message-body." So no point chunking.
+ if status < 200 or status in (204, 205, 304):
+ pass
+ else:
+ if (self.response_protocol == 'HTTP/1.1'
+ and self.environ["REQUEST_METHOD"] != 'HEAD'):
+ # Use the chunked transfer-coding
+ self.chunked_write = True
+ self.outheaders.append(("Transfer-Encoding", "chunked"))
+ else:
+ # Closing the conn is the only way to determine len.
+ self.close_connection = True
+
+ if "connection" not in hkeys:
+ if self.response_protocol == 'HTTP/1.1':
+ # Both server and client are HTTP/1.1 or better
+ if self.close_connection:
+ self.outheaders.append(("Connection", "close"))
+ else:
+ # Server and/or client are HTTP/1.0
+ if not self.close_connection:
+ self.outheaders.append(("Connection", "Keep-Alive"))
+
+ if (not self.close_connection) and (not self.chunked_read):
+ # Read any remaining request body data on the socket.
+ # "If an origin server receives a request that does not include an
+ # Expect request-header field with the "100-continue" expectation,
+ # the request includes a request body, and the server responds
+ # with a final status code before reading the entire request body
+ # from the transport connection, then the server SHOULD NOT close
+ # the transport connection until it has read the entire request,
+ # or until the client closes the connection. Otherwise, the client
+ # might not reliably receive the response message. However, this
+ # requirement is not be construed as preventing a server from
+ # defending itself against denial-of-service attacks, or from
+ # badly broken client implementations."
+ size = self.rfile.maxlen - self.rfile.bytes_read
+ if size > 0:
+ self.rfile.read(size)
+
+ if "date" not in hkeys:
+ self.outheaders.append(("Date", rfc822.formatdate()))
+
+ if "server" not in hkeys:
+ self.outheaders.append(("Server", self.environ['SERVER_SOFTWARE']))
+
+ buf = [self.environ['ACTUAL_SERVER_PROTOCOL'], " ", self.status, "\r\n"]
+ try:
+ buf += [k + ": " + v + "\r\n" for k, v in self.outheaders]
+ except TypeError:
+ if not isinstance(k, str):
+ raise TypeError("WSGI response header key %r is not a string.")
+ if not isinstance(v, str):
+ raise TypeError("WSGI response header value %r is not a string.")
+ else:
+ raise
+ buf.append("\r\n")
+ self.wfile.sendall("".join(buf))
+
+
+class NoSSLError(Exception):
+ """Exception raised when a client speaks HTTP to an HTTPS socket."""
+ pass
+
+
+class FatalSSLAlert(Exception):
+ """Exception raised when the SSL implementation signals a fatal alert."""
+ pass
+
+
+if not _fileobject_uses_str_type:
+ class CP_fileobject(socket._fileobject):
+ """Faux file object attached to a socket object."""
+
+ def sendall(self, data):
+ """Sendall for non-blocking sockets."""
+ while data:
+ try:
+ bytes_sent = self.send(data)
+ data = data[bytes_sent:]
+ except socket.error, e:
+ if e.args[0] not in socket_errors_nonblocking:
+ raise
+
+ def send(self, data):
+ return self._sock.send(data)
+
+ def flush(self):
+ if self._wbuf:
+ buffer = "".join(self._wbuf)
+ self._wbuf = []
+ self.sendall(buffer)
+
+ def recv(self, size):
+ while True:
+ try:
+ return self._sock.recv(size)
+ except socket.error, e:
+ if (e.args[0] not in socket_errors_nonblocking
+ and e.args[0] not in socket_error_eintr):
+ raise
+
+ def read(self, size=-1):
+ # Use max, disallow tiny reads in a loop as they are very inefficient.
+ # We never leave read() with any leftover data from a new recv() call
+ # in our internal buffer.
+ rbufsize = max(self._rbufsize, self.default_bufsize)
+ # Our use of StringIO rather than lists of string objects returned by
+ # recv() minimizes memory usage and fragmentation that occurs when
+ # rbufsize is large compared to the typical return value of recv().
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if size < 0:
+ # Read until EOF
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(rbufsize)
+ if not data:
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ buf_len = buf.tell()
+ if buf_len >= size:
+ # Already have size bytes in our buffer? Extract and return.
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ left = size - buf_len
+ # recv() will malloc the amount of memory given as its
+ # parameter even though it often returns much less data
+ # than that. The returned data string is short lived
+ # as we copy it into a StringIO and free it. This avoids
+ # fragmentation issues on many platforms.
+ data = self.recv(left)
+ if not data:
+ break
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid buffer data copies when:
+ # - We have no data in our buffer.
+ # AND
+ # - Our call to recv returned exactly the
+ # number of bytes we were asked to read.
+ return data
+ if n == left:
+ buf.write(data)
+ del data # explicit free
+ break
+ assert n <= left, "recv(%d) returned %d bytes" % (left, n)
+ buf.write(data)
+ buf_len += n
+ del data # explicit free
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+ def readline(self, size=-1):
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if buf.tell() > 0:
+ # check if we already have it in our buffer
+ buf.seek(0)
+ bline = buf.readline(size)
+ if bline.endswith('\n') or len(bline) == size:
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return bline
+ del bline
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ buf.seek(0)
+ buffers = [buf.read()]
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ data = None
+ recv = self.recv
+ while data != "\n":
+ data = recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+
+ buf.seek(0, 2) # seek end
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ buf.write(data[:nl])
+ self._rbuf.write(data[nl:])
+ del data
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ buf.seek(0, 2) # seek end
+ buf_len = buf.tell()
+ if buf_len >= size:
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ left = size - buf_len
+ # did we just receive a newline?
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ # save the excess data to _rbuf
+ self._rbuf.write(data[nl:])
+ if buf_len:
+ buf.write(data[:nl])
+ break
+ else:
+ # Shortcut. Avoid data copy through buf when returning
+ # a substring of our first recv().
+ return data[:nl]
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid data copy through buf when
+ # returning exactly all of our first recv().
+ return data
+ if n >= left:
+ buf.write(data[:left])
+ self._rbuf.write(data[left:])
+ break
+ buf.write(data)
+ buf_len += n
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+else:
+ class CP_fileobject(socket._fileobject):
+ """Faux file object attached to a socket object."""
+
+ def sendall(self, data):
+ """Sendall for non-blocking sockets."""
+ while data:
+ try:
+ bytes_sent = self.send(data)
+ data = data[bytes_sent:]
+ except socket.error, e:
+ if e.args[0] not in socket_errors_nonblocking:
+ raise
+
+ def send(self, data):
+ return self._sock.send(data)
+
+ def flush(self):
+ if self._wbuf:
+ buffer = "".join(self._wbuf)
+ self._wbuf = []
+ self.sendall(buffer)
+
+ def recv(self, size):
+ while True:
+ try:
+ return self._sock.recv(size)
+ except socket.error, e:
+ if (e.args[0] not in socket_errors_nonblocking
+ and e.args[0] not in socket_error_eintr):
+ raise
+
+ def read(self, size=-1):
+ if size < 0:
+ # Read until EOF
+ buffers = [self._rbuf]
+ self._rbuf = ""
+ if self._rbufsize <= 1:
+ recv_size = self.default_bufsize
+ else:
+ recv_size = self._rbufsize
+
+ while True:
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ data = self._rbuf
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ left = size - buf_len
+ recv_size = max(self._rbufsize, left)
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+ def readline(self, size=-1):
+ data = self._rbuf
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ assert data == ""
+ buffers = []
+ while data != "\n":
+ data = self.recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ return "".join(buffers)
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ nl = data.find('\n', 0, size)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ left = size - buf_len
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+
+class SSL_fileobject(CP_fileobject):
+ """SSL file object attached to a socket object."""
+
+ ssl_timeout = 3
+ ssl_retry = .01
+
+ def _safe_call(self, is_reader, call, *args, **kwargs):
+ """Wrap the given call with SSL error-trapping.
+
+ is_reader: if False EOF errors will be raised. If True, EOF errors
+ will return "" (to emulate normal sockets).
+ """
+ start = time.time()
+ while True:
+ try:
+ return call(*args, **kwargs)
+ except SSL.WantReadError:
+ # Sleep and try again. This is dangerous, because it means
+ # the rest of the stack has no way of differentiating
+ # between a "new handshake" error and "client dropped".
+ # Note this isn't an endless loop: there's a timeout below.
+ time.sleep(self.ssl_retry)
+ except SSL.WantWriteError:
+ time.sleep(self.ssl_retry)
+ except SSL.SysCallError, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ errnum = e.args[0]
+ if is_reader and errnum in socket_errors_to_ignore:
+ return ""
+ raise socket.error(errnum)
+ except SSL.Error, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ thirdarg = None
+ try:
+ thirdarg = e.args[0][0][2]
+ except IndexError:
+ pass
+
+ if thirdarg == 'http request':
+ # The client is talking HTTP to an HTTPS server.
+ raise NoSSLError()
+ raise FatalSSLAlert(*e.args)
+ except:
+ raise
+
+ if time.time() - start > self.ssl_timeout:
+ raise socket.timeout("timed out")
+
+ def recv(self, *args, **kwargs):
+ buf = []
+ r = super(SSL_fileobject, self).recv
+ while True:
+ data = self._safe_call(True, r, *args, **kwargs)
+ buf.append(data)
+ p = self._sock.pending()
+ if not p:
+ return "".join(buf)
+
+ def sendall(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs)
+
+ def send(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs)
+
+
+class HTTPConnection(object):
+ """An HTTP connection (active socket).
+
+ socket: the raw socket object (usually TCP) for this connection.
+ wsgi_app: the WSGI application for this server/connection.
+ environ: a WSGI environ template. This will be copied for each request.
+
+ rfile: a fileobject for reading from the socket.
+ send: a function for writing (+ flush) to the socket.
+ """
+
+ rbufsize = -1
+ RequestHandlerClass = HTTPRequest
+ environ = {"wsgi.version": (1, 0),
+ "wsgi.url_scheme": "http",
+ "wsgi.multithread": True,
+ "wsgi.multiprocess": False,
+ "wsgi.run_once": False,
+ "wsgi.errors": sys.stderr,
+ }
+
+ def __init__(self, sock, wsgi_app, environ):
+ self.socket = sock
+ self.wsgi_app = wsgi_app
+
+ # Copy the class environ into self.
+ self.environ = self.environ.copy()
+ self.environ.update(environ)
+
+ if SSL and isinstance(sock, SSL.ConnectionType):
+ timeout = sock.gettimeout()
+ self.rfile = SSL_fileobject(sock, "rb", self.rbufsize)
+ self.rfile.ssl_timeout = timeout
+ self.wfile = SSL_fileobject(sock, "wb", -1)
+ self.wfile.ssl_timeout = timeout
+ else:
+ self.rfile = CP_fileobject(sock, "rb", self.rbufsize)
+ self.wfile = CP_fileobject(sock, "wb", -1)
+
+ # Wrap wsgi.input but not HTTPConnection.rfile itself.
+ # We're also not setting maxlen yet; we'll do that separately
+ # for headers and body for each iteration of self.communicate
+ # (if maxlen is 0 the wrapper doesn't check length).
+ self.environ["wsgi.input"] = SizeCheckWrapper(self.rfile, 0)
+
+ def communicate(self):
+ """Read each request and respond appropriately."""
+ try:
+ while True:
+ # (re)set req to None so that if something goes wrong in
+ # the RequestHandlerClass constructor, the error doesn't
+ # get written to the previous request.
+ req = None
+ req = self.RequestHandlerClass(self.wfile, self.environ,
+ self.wsgi_app)
+
+ # This order of operations should guarantee correct pipelining.
+ req.parse_request()
+ if not req.ready:
+ return
+
+ req.respond()
+ if req.close_connection:
+ return
+
+ except socket.error, e:
+ errnum = e.args[0]
+ if errnum == 'timed out':
+ if req and not req.sent_headers:
+ req.simple_response("408 Request Timeout")
+ elif errnum not in socket_errors_to_ignore:
+ if req and not req.sent_headers:
+ req.simple_response("500 Internal Server Error",
+ format_exc())
+ return
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except FatalSSLAlert, e:
+ # Close the connection.
+ return
+ except NoSSLError:
+ if req and not req.sent_headers:
+ # Unwrap our wfile
+ req.wfile = CP_fileobject(self.socket._sock, "wb", -1)
+ req.simple_response("400 Bad Request",
+ "The client sent a plain HTTP request, but "
+ "this server only speaks HTTPS on this port.")
+ self.linger = True
+ except Exception, e:
+ if req and not req.sent_headers:
+ req.simple_response("500 Internal Server Error", format_exc())
+
+ linger = False
+
+ def close(self):
+ """Close the socket underlying this connection."""
+ self.rfile.close()
+
+ if not self.linger:
+ # Python's socket module does NOT call close on the kernel socket
+ # when you call socket.close(). We do so manually here because we
+ # want this server to send a FIN TCP segment immediately. Note this
+ # must be called *before* calling socket.close(), because the latter
+ # drops its reference to the kernel socket.
+ self.socket._sock.close()
+ self.socket.close()
+ else:
+ # On the other hand, sometimes we want to hang around for a bit
+ # to make sure the client has a chance to read our entire
+ # response. Skipping the close() calls here delays the FIN
+ # packet until the socket object is garbage-collected later.
+ # Someday, perhaps, we'll do the full lingering_close that
+ # Apache does, but not today.
+ pass
+
+
+def format_exc(limit=None):
+ """Like print_exc() but return a string. Backport for Python 2.3."""
+ try:
+ etype, value, tb = sys.exc_info()
+ return ''.join(traceback.format_exception(etype, value, tb, limit))
+ finally:
+ etype = value = tb = None
+
+
+_SHUTDOWNREQUEST = None
+
+class WorkerThread(threading.Thread):
+ """Thread which continuously polls a Queue for Connection objects.
+
+ server: the HTTP Server which spawned this thread, and which owns the
+ Queue and is placing active connections into it.
+ ready: a simple flag for the calling server to know when this thread
+ has begun polling the Queue.
+
+ Due to the timing issues of polling a Queue, a WorkerThread does not
+ check its own 'ready' flag after it has started. To stop the thread,
+ it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
+ (one for each running WorkerThread).
+ """
+
+ conn = None
+
+ def __init__(self, server):
+ self.ready = False
+ self.server = server
+ threading.Thread.__init__(self)
+
+ def run(self):
+ try:
+ self.ready = True
+ while True:
+ conn = self.server.requests.get()
+ if conn is _SHUTDOWNREQUEST:
+ return
+
+ self.conn = conn
+ try:
+ conn.communicate()
+ finally:
+ conn.close()
+ self.conn = None
+ except (KeyboardInterrupt, SystemExit), exc:
+ self.server.interrupt = exc
+
+
+class ThreadPool(object):
+ """A Request Queue for the CherryPyWSGIServer which pools threads.
+
+ ThreadPool objects must provide min, get(), put(obj), start()
+ and stop(timeout) attributes.
+ """
+
+ def __init__(self, server, min=10, max=-1):
+ self.server = server
+ self.min = min
+ self.max = max
+ self._threads = []
+ self._queue = Queue.Queue()
+ self.get = self._queue.get
+
+ def start(self):
+ """Start the pool of threads."""
+ for i in xrange(self.min):
+ self._threads.append(WorkerThread(self.server))
+ for worker in self._threads:
+ worker.setName("CP WSGIServer " + worker.getName())
+ worker.start()
+ for worker in self._threads:
+ while not worker.ready:
+ time.sleep(.1)
+
+ def _get_idle(self):
+ """Number of worker threads which are idle. Read-only."""
+ return len([t for t in self._threads if t.conn is None])
+ idle = property(_get_idle, doc=_get_idle.__doc__)
+
+ def put(self, obj):
+ self._queue.put(obj)
+ if obj is _SHUTDOWNREQUEST:
+ return
+
+ def grow(self, amount):
+ """Spawn new worker threads (not above self.max)."""
+ for i in xrange(amount):
+ if self.max > 0 and len(self._threads) >= self.max:
+ break
+ worker = WorkerThread(self.server)
+ worker.setName("CP WSGIServer " + worker.getName())
+ self._threads.append(worker)
+ worker.start()
+
+ def shrink(self, amount):
+ """Kill off worker threads (not below self.min)."""
+ # Grow/shrink the pool if necessary.
+ # Remove any dead threads from our list
+ for t in self._threads:
+ if not t.isAlive():
+ self._threads.remove(t)
+ amount -= 1
+
+ if amount > 0:
+ for i in xrange(min(amount, len(self._threads) - self.min)):
+ # Put a number of shutdown requests on the queue equal
+ # to 'amount'. Once each of those is processed by a worker,
+ # that worker will terminate and be culled from our list
+ # in self.put.
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ def stop(self, timeout=5):
+ # Must shut down threads here so the code that calls
+ # this method can know when all threads are stopped.
+ for worker in self._threads:
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ # Don't join currentThread (when stop is called inside a request).
+ current = threading.currentThread()
+ while self._threads:
+ worker = self._threads.pop()
+ if worker is not current and worker.isAlive():
+ try:
+ if timeout is None or timeout < 0:
+ worker.join()
+ else:
+ worker.join(timeout)
+ if worker.isAlive():
+ # We exhausted the timeout.
+ # Forcibly shut down the socket.
+ c = worker.conn
+ if c and not c.rfile.closed:
+ if SSL and isinstance(c.socket, SSL.ConnectionType):
+ # pyOpenSSL.socket.shutdown takes no args
+ c.socket.shutdown()
+ else:
+ c.socket.shutdown(socket.SHUT_RD)
+ worker.join()
+ except (AssertionError,
+ # Ignore repeated Ctrl-C.
+ # See http://www.cherrypy.org/ticket/691.
+ KeyboardInterrupt), exc1:
+ pass
+
+
+
+class SSLConnection:
+ """A thread-safe wrapper for an SSL.Connection.
+
+ *args: the arguments to create the wrapped SSL.Connection(*args).
+ """
+
+ def __init__(self, *args):
+ self._ssl_conn = SSL.Connection(*args)
+ self._lock = threading.RLock()
+
+ for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
+ 'renegotiate', 'bind', 'listen', 'connect', 'accept',
+ 'setblocking', 'fileno', 'shutdown', 'close', 'get_cipher_list',
+ 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+ 'makefile', 'get_app_data', 'set_app_data', 'state_string',
+ 'sock_shutdown', 'get_peer_certificate', 'want_read',
+ 'want_write', 'set_connect_state', 'set_accept_state',
+ 'connect_ex', 'sendall', 'settimeout'):
+ exec """def %s(self, *args):
+ self._lock.acquire()
+ try:
+ return self._ssl_conn.%s(*args)
+ finally:
+ self._lock.release()
+""" % (f, f)
+
+
+try:
+ import fcntl
+except ImportError:
+ try:
+ from ctypes import windll, WinError
+ except ImportError:
+ def prevent_socket_inheritance(sock):
+ """Dummy function, since neither fcntl nor ctypes are available."""
+ pass
+ else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (Windows)."""
+ if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
+ raise WinError()
+else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (POSIX)."""
+ fd = sock.fileno()
+ old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
+
+
+class CherryPyWSGIServer(object):
+ """An HTTP server for WSGI.
+
+ bind_addr: The interface on which to listen for connections.
+ For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+ or IPv6 address, or any valid hostname. The string 'localhost' is a
+ synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+ The string '0.0.0.0' is a special IPv4 entry meaning "any active
+ interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+ IPv6. The empty string or None are not allowed.
+
+ For UNIX sockets, supply the filename as a string.
+ wsgi_app: the WSGI 'application callable'; multiple WSGI applications
+ may be passed as (path_prefix, app) pairs.
+ numthreads: the number of worker threads to create (default 10).
+ server_name: the string to set for WSGI's SERVER_NAME environ entry.
+ Defaults to socket.gethostname().
+ max: the maximum number of queued requests (defaults to -1 = no limit).
+ request_queue_size: the 'backlog' argument to socket.listen();
+ specifies the maximum number of queued connections (default 5).
+ timeout: the timeout in seconds for accepted connections (default 10).
+
+ nodelay: if True (the default since 3.1), sets the TCP_NODELAY socket
+ option.
+
+ protocol: the version string to write in the Status-Line of all
+ HTTP responses. For example, "HTTP/1.1" (the default). This
+ also limits the supported features used in the response.
+
+
+ SSL/HTTPS
+ ---------
+ The OpenSSL module must be importable for SSL functionality.
+ You can obtain it from http://pyopenssl.sourceforge.net/
+
+ ssl_certificate: the filename of the server SSL certificate.
+ ssl_privatekey: the filename of the server's private key file.
+
+ If either of these is None (both are None by default), this server
+ will not use SSL. If both are given and are valid, they will be read
+ on server start and used in the SSL context for the listening socket.
+ """
+
+ protocol = "HTTP/1.1"
+ _bind_addr = "127.0.0.1"
+ version = "CherryPy/3.1.2"
+ ready = False
+ _interrupt = None
+
+ nodelay = True
+
+ ConnectionClass = HTTPConnection
+ environ = {}
+
+ # Paths to certificate and private key files
+ ssl_certificate = None
+ ssl_private_key = None
+
+ def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
+ max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
+ self.requests = ThreadPool(self, min=numthreads or 1, max=max)
+
+ if callable(wsgi_app):
+ # We've been handed a single wsgi_app, in CP-2.1 style.
+ # Assume it's mounted at "".
+ self.wsgi_app = wsgi_app
+ else:
+ # We've been handed a list of (path_prefix, wsgi_app) tuples,
+ # so that the server can call different wsgi_apps, and also
+ # correctly set SCRIPT_NAME.
+ warnings.warn("The ability to pass multiple apps is deprecated "
+ "and will be removed in 3.2. You should explicitly "
+ "include a WSGIPathInfoDispatcher instead.",
+ DeprecationWarning)
+ self.wsgi_app = WSGIPathInfoDispatcher(wsgi_app)
+
+ self.bind_addr = bind_addr
+ if not server_name:
+ server_name = socket.gethostname()
+ self.server_name = server_name
+ self.request_queue_size = request_queue_size
+
+ self.timeout = timeout
+ self.shutdown_timeout = shutdown_timeout
+
+ def _get_numthreads(self):
+ return self.requests.min
+ def _set_numthreads(self, value):
+ self.requests.min = value
+ numthreads = property(_get_numthreads, _set_numthreads)
+
+ def __str__(self):
+ return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
+ self.bind_addr)
+
+ def _get_bind_addr(self):
+ return self._bind_addr
+ def _set_bind_addr(self, value):
+ if isinstance(value, tuple) and value[0] in ('', None):
+ # Despite the socket module docs, using '' does not
+ # allow AI_PASSIVE to work. Passing None instead
+ # returns '0.0.0.0' like we want. In other words:
+ # host AI_PASSIVE result
+ # '' Y 192.168.x.y
+ # '' N 192.168.x.y
+ # None Y 0.0.0.0
+ # None N 127.0.0.1
+ # But since you can get the same effect with an explicit
+ # '0.0.0.0', we deny both the empty string and None as values.
+ raise ValueError("Host values of '' or None are not allowed. "
+ "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
+ "to listen on all active interfaces.")
+ self._bind_addr = value
+ bind_addr = property(_get_bind_addr, _set_bind_addr,
+ doc="""The interface on which to listen for connections.
+
+ For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+ or IPv6 address, or any valid hostname. The string 'localhost' is a
+ synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+ The string '0.0.0.0' is a special IPv4 entry meaning "any active
+ interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+ IPv6. The empty string or None are not allowed.
+
+ For UNIX sockets, supply the filename as a string.""")
+
+ def start(self):
+ """Run the server forever."""
+ # We don't have to trap KeyboardInterrupt or SystemExit here,
+ # because cherrpy.server already does so, calling self.stop() for us.
+ # If you're using this server with another framework, you should
+ # trap those exceptions in whatever code block calls start().
+ self._interrupt = None
+
+ # Select the appropriate socket
+ if isinstance(self.bind_addr, basestring):
+ # AF_UNIX socket
+
+ # So we can reuse the socket...
+ try: os.unlink(self.bind_addr)
+ except: pass
+
+ # So everyone can access the socket...
+ try: os.chmod(self.bind_addr, 0777)
+ except: pass
+
+ info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+ else:
+ # AF_INET or AF_INET6 socket
+ # Get the correct address family for our host (allows IPv6 addresses)
+ host, port = self.bind_addr
+ try:
+ info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
+ except socket.gaierror:
+ # Probably a DNS issue. Assume IPv4.
+ info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+
+ self.socket = None
+ msg = "No socket could be created"
+ for res in info:
+ af, socktype, proto, canonname, sa = res
+ try:
+ self.bind(af, socktype, proto)
+ except socket.error, msg:
+ if self.socket:
+ self.socket.close()
+ self.socket = None
+ continue
+ break
+ if not self.socket:
+ raise socket.error, msg
+
+ # Timeout so KeyboardInterrupt can be caught on Win32
+ self.socket.settimeout(1)
+ self.socket.listen(self.request_queue_size)
+
+ # Create worker threads
+ self.requests.start()
+
+ self.ready = True
+ while self.ready:
+ self.tick()
+ if self.interrupt:
+ while self.interrupt is True:
+ # Wait for self.stop() to complete. See _set_interrupt.
+ time.sleep(0.1)
+ if self.interrupt:
+ raise self.interrupt
+
+ def bind(self, family, type, proto=0):
+ """Create (or recreate) the actual socket object."""
+ self.socket = socket.socket(family, type, proto)
+ prevent_socket_inheritance(self.socket)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if self.nodelay:
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if self.ssl_certificate and self.ssl_private_key:
+ if SSL is None:
+ raise ImportError("You must install pyOpenSSL to use HTTPS.")
+
+ # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(self.ssl_private_key)
+ ctx.use_certificate_file(self.ssl_certificate)
+ self.socket = SSLConnection(ctx, self.socket)
+ self.populate_ssl_environ()
+
+ # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
+ # activate dual-stack. See http://www.cherrypy.org/ticket/871.
+ if (not isinstance(self.bind_addr, basestring)
+ and self.bind_addr[0] == '::' and family == socket.AF_INET6):
+ try:
+ self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except (AttributeError, socket.error):
+ # Apparently, the socket option is not available in
+ # this machine's TCP stack
+ pass
+
+ self.socket.bind(self.bind_addr)
+
+ def tick(self):
+ """Accept a new connection and put it on the Queue."""
+ try:
+ s, addr = self.socket.accept()
+ prevent_socket_inheritance(s)
+ if not self.ready:
+ return
+ if hasattr(s, 'settimeout'):
+ s.settimeout(self.timeout)
+
+ environ = self.environ.copy()
+ # SERVER_SOFTWARE is common for IIS. It's also helpful for
+ # us to pass a default value for the "Server" response header.
+ if environ.get("SERVER_SOFTWARE") is None:
+ environ["SERVER_SOFTWARE"] = "%s WSGI Server" % self.version
+ # set a non-standard environ entry so the WSGI app can know what
+ # the *real* server protocol is (and what features to support).
+ # See http://www.faqs.org/rfcs/rfc2145.html.
+ environ["ACTUAL_SERVER_PROTOCOL"] = self.protocol
+ environ["SERVER_NAME"] = self.server_name
+
+ if isinstance(self.bind_addr, basestring):
+ # AF_UNIX. This isn't really allowed by WSGI, which doesn't
+ # address unix domain sockets. But it's better than nothing.
+ environ["SERVER_PORT"] = ""
+ else:
+ environ["SERVER_PORT"] = str(self.bind_addr[1])
+ # optional values
+ # Until we do DNS lookups, omit REMOTE_HOST
+ environ["REMOTE_ADDR"] = addr[0]
+ environ["REMOTE_PORT"] = str(addr[1])
+
+ conn = self.ConnectionClass(s, self.wsgi_app, environ)
+ self.requests.put(conn)
+ except socket.timeout:
+ # The only reason for the timeout in start() is so we can
+ # notice keyboard interrupts on Win32, which don't interrupt
+ # accept() by default
+ return
+ except socket.error, x:
+ if x.args[0] in socket_error_eintr:
+ # I *think* this is right. EINTR should occur when a signal
+ # is received during the accept() call; all docs say retry
+ # the call, and I *think* I'm reading it right that Python
+ # will then go ahead and poll for and handle the signal
+ # elsewhere. See http://www.cherrypy.org/ticket/707.
+ return
+ if x.args[0] in socket_errors_nonblocking:
+ # Just try again. See http://www.cherrypy.org/ticket/479.
+ return
+ if x.args[0] in socket_errors_to_ignore:
+ # Our socket was closed.
+ # See http://www.cherrypy.org/ticket/686.
+ return
+ raise
+
+ def _get_interrupt(self):
+ return self._interrupt
+ def _set_interrupt(self, interrupt):
+ self._interrupt = True
+ self.stop()
+ self._interrupt = interrupt
+ interrupt = property(_get_interrupt, _set_interrupt,
+ doc="Set this to an Exception instance to "
+ "interrupt the server.")
+
+ def stop(self):
+ """Gracefully shutdown a server that is serving forever."""
+ self.ready = False
+
+ sock = getattr(self, "socket", None)
+ if sock:
+ if not isinstance(self.bind_addr, basestring):
+ # Touch our own socket to make accept() return immediately.
+ try:
+ host, port = sock.getsockname()[:2]
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+ else:
+ # Note that we're explicitly NOT using AI_PASSIVE,
+ # here, because we want an actual IP to touch.
+ # localhost won't work if we've bound to a public IP,
+ # but it will if we bound to '0.0.0.0' (INADDR_ANY).
+ for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ s = None
+ try:
+ s = socket.socket(af, socktype, proto)
+ # See http://groups.google.com/group/cherrypy-users/
+ # browse_frm/thread/bbfe5eb39c904fe0
+ s.settimeout(1.0)
+ s.connect((host, port))
+ s.close()
+ except socket.error:
+ if s:
+ s.close()
+ if hasattr(sock, "close"):
+ sock.close()
+ self.socket = None
+
+ self.requests.stop(self.shutdown_timeout)
+
+ def populate_ssl_environ(self):
+ """Create WSGI environ entries to be merged into each request."""
+ cert = open(self.ssl_certificate, 'rb').read()
+ cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+ ssl_environ = {
+ "wsgi.url_scheme": "https",
+ "HTTPS": "on",
+ # pyOpenSSL doesn't provide access to any of these AFAICT
+## 'SSL_PROTOCOL': 'SSLv2',
+## SSL_CIPHER string The cipher specification name
+## SSL_VERSION_INTERFACE string The mod_ssl program version
+## SSL_VERSION_LIBRARY string The OpenSSL program version
+ }
+
+ # Server certificate attributes
+ ssl_environ.update({
+ 'SSL_SERVER_M_VERSION': cert.get_version(),
+ 'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
+## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
+## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
+ })
+
+ for prefix, dn in [("I", cert.get_issuer()),
+ ("S", cert.get_subject())]:
+ # X509Name objects don't seem to have a way to get the
+ # complete DN string. Use str() and slice it instead,
+ # because str(dn) == "<X509Name object '/C=US/ST=...'>"
+ dnstr = str(dn)[18:-2]
+
+ wsgikey = 'SSL_SERVER_%s_DN' % prefix
+ ssl_environ[wsgikey] = dnstr
+
+ # The DN should be of the form: /k1=v1/k2=v2, but we must allow
+ # for any value to contain slashes itself (in a URL).
+ while dnstr:
+ pos = dnstr.rfind("=")
+ dnstr, value = dnstr[:pos], dnstr[pos + 1:]
+ pos = dnstr.rfind("/")
+ dnstr, key = dnstr[:pos], dnstr[pos + 1:]
+ if key and value:
+ wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
+ ssl_environ[wsgikey] = value
+
+ self.environ.update(ssl_environ)
+
--- a/review/api.py Wed Oct 21 19:31:15 2009 -0400
+++ b/review/api.py Wed Oct 21 19:32:14 2009 -0400
@@ -3,7 +3,7 @@
"""The API for interacting with code review data."""
import datetime, operator, os
-import messages, templates
+import file_templates, messages
from mercurial import cmdutil, error, hg, patch, util
from mercurial.node import hex
@@ -560,7 +560,7 @@
"""
rendered_date = util.datestr(self.hgdate)
lines = ','.join(self.lines)
- return templates.COMMENT_FILE_TEMPLATE % ( self.author, rendered_date,
+ return file_templates.COMMENT_FILE_TEMPLATE % ( self.author, rendered_date,
self.node, self.filename, lines, self.message )
def __str__(self):
@@ -631,6 +631,6 @@
"""
rendered_date = util.datestr(self.hgdate)
- return templates.SIGNOFF_FILE_TEMPLATE % ( self.author, rendered_date,
+ return file_templates.SIGNOFF_FILE_TEMPLATE % ( self.author, rendered_date,
self.node, self.opinion, self.message )
--- a/review/extension_ui.py Wed Oct 21 19:31:15 2009 -0400
+++ b/review/extension_ui.py Wed Oct 21 19:32:14 2009 -0400
@@ -11,6 +11,11 @@
from mercurial import help, templatefilters, util
from mercurial.node import short
+def _web_command(ui, repo, **opts):
+ ui.note(messages.WEB_START)
+
+ import web_ui
+ web_ui.load_interface(ui, repo)
def _init_command(ui, repo, **opts):
ui.note(messages.INIT_START)
@@ -227,7 +232,9 @@
comments and signoffs so other people can view them.
"""
- if opts.pop('init'):
+ if opts.pop('web'):
+ return _web_command(ui, repo, **opts)
+ elif opts.pop('init'):
return _init_command(ui, repo, **opts)
elif opts.pop('comment'):
return _comment_command(ui, repo, *fnames, **opts)
@@ -251,6 +258,7 @@
('r', 'rev', '.', 'the revision to review'),
('l', 'lines', '', 'the line(s) of the file to comment on'),
('U', 'unified', '5', 'number of lines of context to show'),
+ ('w', 'web', False, 'launch the web interface'),
],
'hg review')
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/file_templates.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,18 @@
+"""Templates for hg-review's data files."""
+
+COMMENT_FILE_TEMPLATE = """\
+author:%s
+hgdate:%s
+node:%s
+filename:%s
+lines:%s
+
+%s"""
+
+SIGNOFF_FILE_TEMPLATE = """\
+author:%s
+hgdate:%s
+node:%s
+opinion:%s
+
+%s"""
\ No newline at end of file
--- a/review/messages.py Wed Oct 21 19:31:15 2009 -0400
+++ b/review/messages.py Wed Oct 21 19:32:14 2009 -0400
@@ -102,4 +102,8 @@
COMMIT_COMMENT = """Add a comment on changeset %s"""
COMMIT_SIGNOFF = """Sign off on changeset %s"""
-DELETE_SIGNOFF = """Remove sign off on changeset %s"""
\ No newline at end of file
+DELETE_SIGNOFF = """Remove sign off on changeset %s"""
+
+WEB_START = """\
+starting CherryPy web server
+"""
\ No newline at end of file
--- a/review/templates.py Wed Oct 21 19:31:15 2009 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-"""Templates for hg-review's data files."""
-
-COMMENT_FILE_TEMPLATE = """\
-author:%s
-hgdate:%s
-node:%s
-filename:%s
-lines:%s
-
-%s"""
-
-SIGNOFF_FILE_TEMPLATE = """\
-author:%s
-hgdate:%s
-node:%s
-opinion:%s
-
-%s"""
\ No newline at end of file
--- a/review/tests/util.py Wed Oct 21 19:31:15 2009 -0400
+++ b/review/tests/util.py Wed Oct 21 19:32:14 2009 -0400
@@ -9,7 +9,7 @@
_ui = ui.ui()
def review(init=False, comment=False, signoff=False, yes=False, no=False,
force=False, message='', rev='.', local_path='', remote_path='', lines='',
- files=None, unified='5'):
+ files=None, unified='5', web=False):
files = files if files else []
@@ -17,7 +17,7 @@
extension_ui.review(_ui, get_sandbox_repo(), *files,
init=init, comment=comment, signoff=signoff, yes=yes, no=no,
force=force, message=message, rev=rev, local_path=local_path,
- remote_path=remote_path, lines=lines, unified=unified )
+ remote_path=remote_path, lines=lines, unified=unified, web=web)
output = _ui.popbuffer()
print output
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_media/aal.css Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,99 @@
+/*
+ aardvark.legs by Anatoli Papirovski - http://fecklessmind.com/
+ Licensed under the MIT license. http://www.opensource.org/licenses/mit-license.php
+*/
+
+/*
+ Reset first. Modified version of Eric Meyer and Paul Chaplin reset
+ from http://meyerweb.com/eric/tools/css/reset/
+*/
+html, body, div, span, applet, object, iframe,
+h1, h2, h3, h4, h5, h6, p, blockquote, pre,
+a, abbr, acronym, address, big, cite, code,
+del, dfn, em, font, img, ins, kbd, q, s, samp,
+small, strike, strong, sub, sup, tt, var,
+b, u, i, center,
+dl, dt, dd, ol, ul, li,
+fieldset, form, label, legend,
+table, caption, tbody, tfoot, thead, tr, th, td,
+header, nav, section, article, aside, footer
+{border: 0; margin: 0; outline: 0; padding: 0; background: transparent; vertical-align: baseline;}
+
+blockquote, q {quotes: none;}
+blockquote:before,blockquote:after,q:before,q:after {content: ''; content: none;}
+
+header, nav, section, article, aside, footer {display: block;}
+
+/* Basic styles */
+body {background: #fff; color: #111; font: 0.75em/1.5em "Helvetica Neue", Helvetica, Arial, "Liberation Sans", "Bitstream Vera Sans", sans-serif;}
+html>body {font-size: 12px;}
+
+img {display: inline-block; vertical-align: bottom;}
+
+h1,h2,h3,h4,h5,h6,strong,b,dt,th {font-weight: 700;}
+address,cite,em,i,caption,dfn,var {font-style: italic;}
+
+h1 {margin: 0 0 0.75em; font-size: 2em;}
+h2 {margin: 0 0 1em; font-size: 1.5em;}
+h3 {margin: 0 0 1.286em; font-size: 1.167em;}
+h4 {margin: 0 0 1.5em; font-size: 1em;}
+h5 {margin: 0 0 1.8em; font-size: .834em;}
+h6 {margin: 0 0 2em; font-size: .75em;}
+
+p,ul,ol,dl,blockquote,pre {margin: 0 0 1.5em;}
+
+li ul,li ol {margin: 0;}
+ul {list-style: outside disc;}
+ol {list-style: outside decimal;}
+li {margin: 0 0 0 2em;}
+dd {padding-left: 1.5em;}
+blockquote {padding: 0 1.5em;}
+
+a {text-decoration: underline;}
+a:hover {text-decoration: none;}
+abbr,acronym {border-bottom: 1px dotted; cursor: help;}
+del {text-decoration: line-through;}
+ins {text-decoration: overline;}
+sub {font-size: .834em; line-height: 1em; vertical-align: sub;}
+sup {font-size: .834em; line-height: 1em; vertical-align: super;}
+
+tt,code,kbd,samp,pre {font-size: 1em; font-family: Consolas, Monaco, "Courier New", Courier, monospace;}
+
+/* Table styles */
+table {border-collapse: collapse; border-spacing: 0; margin: 0 0 1.5em;}
+caption {text-align: left;}
+th, td {padding: .25em .5em;}
+tbody td, tbody th {border: 1px solid #000;}
+tfoot {font-style: italic;}
+
+/* Form styles */
+fieldset {clear: both;}
+legend {padding: 0 0 1.286em; font-size: 1.167em; font-weight: 700;}
+fieldset fieldset legend {padding: 0 0 1.5em; font-size: 1em;}
+* html legend {margin-left: -7px;}
+*+html legend {margin-left: -7px;}
+
+form .field, form .buttons {clear: both; margin: 0 0 1.5em;}
+form .field label {display: block;}
+form ul.fields li {list-style-type: none; margin: 0;}
+form ul.inline li, form ul.inline label {display: inline;}
+form ul.inline li {padding: 0 .75em 0 0;}
+
+input.radio, input.checkbox {vertical-align: top;}
+label, button, input.submit, input.image {cursor: pointer;}
+* html input.radio, * html input.checkbox {vertical-align: middle;}
+*+html input.radio, *+html input.checkbox {vertical-align: middle;}
+
+textarea {overflow: auto;}
+input.text, input.password, textarea, select {margin: 0; font: 1em/1.3 Helvetica, Arial, "Liberation Sans", "Bitstream Vera Sans", sans-serif; vertical-align: baseline;}
+input.text, input.password, textarea {border: 1px solid #444; border-bottom-color: #666; border-right-color: #666; padding: 2px;}
+
+* html button {margin: 0 .34em 0 0;}
+*+html button {margin: 0 .34em 0 0;}
+
+form.horizontal .field {padding-left: 150px;}
+form.horizontal .field label {display: inline; float: left; width: 140px; margin-left: -150px;}
+
+/* Useful classes */
+img.left {display: inline; float: left; margin: 0 1.5em .75em 0;}
+img.right {display: inline; float: right; margin: 0 0 .75em .75em;}
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_media/comments.js Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,11 @@
+$(function() {
+
+ $("form").hide();
+
+ $("p.comment-activate a").click(function() {
+ $(event.target).hide();
+ $(event.target).closest("div").children("form").fadeIn("fast");
+ return false;
+ });
+
+});
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_media/jquery-1.3.2.min.js Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,19 @@
+/*
+ * jQuery JavaScript Library v1.3.2
+ * http://jquery.com/
+ *
+ * Copyright (c) 2009 John Resig
+ * Dual licensed under the MIT and GPL licenses.
+ * http://docs.jquery.com/License
+ *
+ * Date: 2009-02-19 17:34:21 -0500 (Thu, 19 Feb 2009)
+ * Revision: 6246
+ */
+(function(){var l=this,g,y=l.jQuery,p=l.$,o=l.jQuery=l.$=function(E,F){return new o.fn.init(E,F)},D=/^[^<]*(<(.|\s)+>)[^>]*$|^#([\w-]+)$/,f=/^.[^:#\[\.,]*$/;o.fn=o.prototype={init:function(E,H){E=E||document;if(E.nodeType){this[0]=E;this.length=1;this.context=E;return this}if(typeof E==="string"){var G=D.exec(E);if(G&&(G[1]||!H)){if(G[1]){E=o.clean([G[1]],H)}else{var I=document.getElementById(G[3]);if(I&&I.id!=G[3]){return o().find(E)}var F=o(I||[]);F.context=document;F.selector=E;return F}}else{return o(H).find(E)}}else{if(o.isFunction(E)){return o(document).ready(E)}}if(E.selector&&E.context){this.selector=E.selector;this.context=E.context}return this.setArray(o.isArray(E)?E:o.makeArray(E))},selector:"",jquery:"1.3.2",size:function(){return this.length},get:function(E){return E===g?Array.prototype.slice.call(this):this[E]},pushStack:function(F,H,E){var G=o(F);G.prevObject=this;G.context=this.context;if(H==="find"){G.selector=this.selector+(this.selector?" ":"")+E}else{if(H){G.selector=this.selector+"."+H+"("+E+")"}}return G},setArray:function(E){this.length=0;Array.prototype.push.apply(this,E);return this},each:function(F,E){return o.each(this,F,E)},index:function(E){return o.inArray(E&&E.jquery?E[0]:E,this)},attr:function(F,H,G){var E=F;if(typeof F==="string"){if(H===g){return this[0]&&o[G||"attr"](this[0],F)}else{E={};E[F]=H}}return this.each(function(I){for(F in E){o.attr(G?this.style:this,F,o.prop(this,E[F],G,I,F))}})},css:function(E,F){if((E=="width"||E=="height")&&parseFloat(F)<0){F=g}return this.attr(E,F,"curCSS")},text:function(F){if(typeof F!=="object"&&F!=null){return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(F))}var E="";o.each(F||this,function(){o.each(this.childNodes,function(){if(this.nodeType!=8){E+=this.nodeType!=1?this.nodeValue:o.fn.text([this])}})});return E},wrapAll:function(E){if(this[0]){var F=o(E,this[0].ownerDocument).clone();if(this[0].parentNode){F.insertBefore(this[0])}F.map(function(){var G=this;while(G.firstChild){G=G.firstChild}return G}).append(this)}return this},wrapInner:function(E){return this.each(function(){o(this).contents().wrapAll(E)})},wrap:function(E){return this.each(function(){o(this).wrapAll(E)})},append:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.appendChild(E)}})},prepend:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.insertBefore(E,this.firstChild)}})},before:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this)})},after:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this.nextSibling)})},end:function(){return this.prevObject||o([])},push:[].push,sort:[].sort,splice:[].splice,find:function(E){if(this.length===1){var F=this.pushStack([],"find",E);F.length=0;o.find(E,this[0],F);return F}else{return this.pushStack(o.unique(o.map(this,function(G){return o.find(E,G)})),"find",E)}},clone:function(G){var E=this.map(function(){if(!o.support.noCloneEvent&&!o.isXMLDoc(this)){var I=this.outerHTML;if(!I){var J=this.ownerDocument.createElement("div");J.appendChild(this.cloneNode(true));I=J.innerHTML}return o.clean([I.replace(/ jQuery\d+="(?:\d+|null)"/g,"").replace(/^\s*/,"")])[0]}else{return this.cloneNode(true)}});if(G===true){var H=this.find("*").andSelf(),F=0;E.find("*").andSelf().each(function(){if(this.nodeName!==H[F].nodeName){return}var I=o.data(H[F],"events");for(var K in I){for(var J in I[K]){o.event.add(this,K,I[K][J],I[K][J].data)}}F++})}return E},filter:function(E){return this.pushStack(o.isFunction(E)&&o.grep(this,function(G,F){return E.call(G,F)})||o.multiFilter(E,o.grep(this,function(F){return F.nodeType===1})),"filter",E)},closest:function(E){var G=o.expr.match.POS.test(E)?o(E):null,F=0;return this.map(function(){var H=this;while(H&&H.ownerDocument){if(G?G.index(H)>-1:o(H).is(E)){o.data(H,"closest",F);return H}H=H.parentNode;F++}})},not:function(E){if(typeof E==="string"){if(f.test(E)){return this.pushStack(o.multiFilter(E,this,true),"not",E)}else{E=o.multiFilter(E,this)}}var F=E.length&&E[E.length-1]!==g&&!E.nodeType;return this.filter(function(){return F?o.inArray(this,E)<0:this!=E})},add:function(E){return this.pushStack(o.unique(o.merge(this.get(),typeof E==="string"?o(E):o.makeArray(E))))},is:function(E){return !!E&&o.multiFilter(E,this).length>0},hasClass:function(E){return !!E&&this.is("."+E)},val:function(K){if(K===g){var E=this[0];if(E){if(o.nodeName(E,"option")){return(E.attributes.value||{}).specified?E.value:E.text}if(o.nodeName(E,"select")){var I=E.selectedIndex,L=[],M=E.options,H=E.type=="select-one";if(I<0){return null}for(var F=H?I:0,J=H?I+1:M.length;F<J;F++){var G=M[F];if(G.selected){K=o(G).val();if(H){return K}L.push(K)}}return L}return(E.value||"").replace(/\r/g,"")}return g}if(typeof K==="number"){K+=""}return this.each(function(){if(this.nodeType!=1){return}if(o.isArray(K)&&/radio|checkbox/.test(this.type)){this.checked=(o.inArray(this.value,K)>=0||o.inArray(this.name,K)>=0)}else{if(o.nodeName(this,"select")){var N=o.makeArray(K);o("option",this).each(function(){this.selected=(o.inArray(this.value,N)>=0||o.inArray(this.text,N)>=0)});if(!N.length){this.selectedIndex=-1}}else{this.value=K}}})},html:function(E){return E===g?(this[0]?this[0].innerHTML.replace(/ jQuery\d+="(?:\d+|null)"/g,""):null):this.empty().append(E)},replaceWith:function(E){return this.after(E).remove()},eq:function(E){return this.slice(E,+E+1)},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments),"slice",Array.prototype.slice.call(arguments).join(","))},map:function(E){return this.pushStack(o.map(this,function(G,F){return E.call(G,F,G)}))},andSelf:function(){return this.add(this.prevObject)},domManip:function(J,M,L){if(this[0]){var I=(this[0].ownerDocument||this[0]).createDocumentFragment(),F=o.clean(J,(this[0].ownerDocument||this[0]),I),H=I.firstChild;if(H){for(var G=0,E=this.length;G<E;G++){L.call(K(this[G],H),this.length>1||G>0?I.cloneNode(true):I)}}if(F){o.each(F,z)}}return this;function K(N,O){return M&&o.nodeName(N,"table")&&o.nodeName(O,"tr")?(N.getElementsByTagName("tbody")[0]||N.appendChild(N.ownerDocument.createElement("tbody"))):N}}};o.fn.init.prototype=o.fn;function z(E,F){if(F.src){o.ajax({url:F.src,async:false,dataType:"script"})}else{o.globalEval(F.text||F.textContent||F.innerHTML||"")}if(F.parentNode){F.parentNode.removeChild(F)}}function e(){return +new Date}o.extend=o.fn.extend=function(){var J=arguments[0]||{},H=1,I=arguments.length,E=false,G;if(typeof J==="boolean"){E=J;J=arguments[1]||{};H=2}if(typeof J!=="object"&&!o.isFunction(J)){J={}}if(I==H){J=this;--H}for(;H<I;H++){if((G=arguments[H])!=null){for(var F in G){var K=J[F],L=G[F];if(J===L){continue}if(E&&L&&typeof L==="object"&&!L.nodeType){J[F]=o.extend(E,K||(L.length!=null?[]:{}),L)}else{if(L!==g){J[F]=L}}}}}return J};var b=/z-?index|font-?weight|opacity|zoom|line-?height/i,q=document.defaultView||{},s=Object.prototype.toString;o.extend({noConflict:function(E){l.$=p;if(E){l.jQuery=y}return o},isFunction:function(E){return s.call(E)==="[object Function]"},isArray:function(E){return s.call(E)==="[object Array]"},isXMLDoc:function(E){return E.nodeType===9&&E.documentElement.nodeName!=="HTML"||!!E.ownerDocument&&o.isXMLDoc(E.ownerDocument)},globalEval:function(G){if(G&&/\S/.test(G)){var F=document.getElementsByTagName("head")[0]||document.documentElement,E=document.createElement("script");E.type="text/javascript";if(o.support.scriptEval){E.appendChild(document.createTextNode(G))}else{E.text=G}F.insertBefore(E,F.firstChild);F.removeChild(E)}},nodeName:function(F,E){return F.nodeName&&F.nodeName.toUpperCase()==E.toUpperCase()},each:function(G,K,F){var E,H=0,I=G.length;if(F){if(I===g){for(E in G){if(K.apply(G[E],F)===false){break}}}else{for(;H<I;){if(K.apply(G[H++],F)===false){break}}}}else{if(I===g){for(E in G){if(K.call(G[E],E,G[E])===false){break}}}else{for(var J=G[0];H<I&&K.call(J,H,J)!==false;J=G[++H]){}}}return G},prop:function(H,I,G,F,E){if(o.isFunction(I)){I=I.call(H,F)}return typeof I==="number"&&G=="curCSS"&&!b.test(E)?I+"px":I},className:{add:function(E,F){o.each((F||"").split(/\s+/),function(G,H){if(E.nodeType==1&&!o.className.has(E.className,H)){E.className+=(E.className?" ":"")+H}})},remove:function(E,F){if(E.nodeType==1){E.className=F!==g?o.grep(E.className.split(/\s+/),function(G){return !o.className.has(F,G)}).join(" "):""}},has:function(F,E){return F&&o.inArray(E,(F.className||F).toString().split(/\s+/))>-1}},swap:function(H,G,I){var E={};for(var F in G){E[F]=H.style[F];H.style[F]=G[F]}I.call(H);for(var F in G){H.style[F]=E[F]}},css:function(H,F,J,E){if(F=="width"||F=="height"){var L,G={position:"absolute",visibility:"hidden",display:"block"},K=F=="width"?["Left","Right"]:["Top","Bottom"];function I(){L=F=="width"?H.offsetWidth:H.offsetHeight;if(E==="border"){return}o.each(K,function(){if(!E){L-=parseFloat(o.curCSS(H,"padding"+this,true))||0}if(E==="margin"){L+=parseFloat(o.curCSS(H,"margin"+this,true))||0}else{L-=parseFloat(o.curCSS(H,"border"+this+"Width",true))||0}})}if(H.offsetWidth!==0){I()}else{o.swap(H,G,I)}return Math.max(0,Math.round(L))}return o.curCSS(H,F,J)},curCSS:function(I,F,G){var L,E=I.style;if(F=="opacity"&&!o.support.opacity){L=o.attr(E,"opacity");return L==""?"1":L}if(F.match(/float/i)){F=w}if(!G&&E&&E[F]){L=E[F]}else{if(q.getComputedStyle){if(F.match(/float/i)){F="float"}F=F.replace(/([A-Z])/g,"-$1").toLowerCase();var M=q.getComputedStyle(I,null);if(M){L=M.getPropertyValue(F)}if(F=="opacity"&&L==""){L="1"}}else{if(I.currentStyle){var J=F.replace(/\-(\w)/g,function(N,O){return O.toUpperCase()});L=I.currentStyle[F]||I.currentStyle[J];if(!/^\d+(px)?$/i.test(L)&&/^\d/.test(L)){var H=E.left,K=I.runtimeStyle.left;I.runtimeStyle.left=I.currentStyle.left;E.left=L||0;L=E.pixelLeft+"px";E.left=H;I.runtimeStyle.left=K}}}}return L},clean:function(F,K,I){K=K||document;if(typeof K.createElement==="undefined"){K=K.ownerDocument||K[0]&&K[0].ownerDocument||document}if(!I&&F.length===1&&typeof F[0]==="string"){var H=/^<(\w+)\s*\/?>$/.exec(F[0]);if(H){return[K.createElement(H[1])]}}var G=[],E=[],L=K.createElement("div");o.each(F,function(P,S){if(typeof S==="number"){S+=""}if(!S){return}if(typeof S==="string"){S=S.replace(/(<(\w+)[^>]*?)\/>/g,function(U,V,T){return T.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?U:V+"></"+T+">"});var O=S.replace(/^\s+/,"").substring(0,10).toLowerCase();var Q=!O.indexOf("<opt")&&[1,"<select multiple='multiple'>","</select>"]||!O.indexOf("<leg")&&[1,"<fieldset>","</fieldset>"]||O.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"<table>","</table>"]||!O.indexOf("<tr")&&[2,"<table><tbody>","</tbody></table>"]||(!O.indexOf("<td")||!O.indexOf("<th"))&&[3,"<table><tbody><tr>","</tr></tbody></table>"]||!O.indexOf("<col")&&[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"]||!o.support.htmlSerialize&&[1,"div<div>","</div>"]||[0,"",""];L.innerHTML=Q[1]+S+Q[2];while(Q[0]--){L=L.lastChild}if(!o.support.tbody){var R=/<tbody/i.test(S),N=!O.indexOf("<table")&&!R?L.firstChild&&L.firstChild.childNodes:Q[1]=="<table>"&&!R?L.childNodes:[];for(var M=N.length-1;M>=0;--M){if(o.nodeName(N[M],"tbody")&&!N[M].childNodes.length){N[M].parentNode.removeChild(N[M])}}}if(!o.support.leadingWhitespace&&/^\s/.test(S)){L.insertBefore(K.createTextNode(S.match(/^\s*/)[0]),L.firstChild)}S=o.makeArray(L.childNodes)}if(S.nodeType){G.push(S)}else{G=o.merge(G,S)}});if(I){for(var J=0;G[J];J++){if(o.nodeName(G[J],"script")&&(!G[J].type||G[J].type.toLowerCase()==="text/javascript")){E.push(G[J].parentNode?G[J].parentNode.removeChild(G[J]):G[J])}else{if(G[J].nodeType===1){G.splice.apply(G,[J+1,0].concat(o.makeArray(G[J].getElementsByTagName("script"))))}I.appendChild(G[J])}}return E}return G},attr:function(J,G,K){if(!J||J.nodeType==3||J.nodeType==8){return g}var H=!o.isXMLDoc(J),L=K!==g;G=H&&o.props[G]||G;if(J.tagName){var F=/href|src|style/.test(G);if(G=="selected"&&J.parentNode){J.parentNode.selectedIndex}if(G in J&&H&&!F){if(L){if(G=="type"&&o.nodeName(J,"input")&&J.parentNode){throw"type property can't be changed"}J[G]=K}if(o.nodeName(J,"form")&&J.getAttributeNode(G)){return J.getAttributeNode(G).nodeValue}if(G=="tabIndex"){var I=J.getAttributeNode("tabIndex");return I&&I.specified?I.value:J.nodeName.match(/(button|input|object|select|textarea)/i)?0:J.nodeName.match(/^(a|area)$/i)&&J.href?0:g}return J[G]}if(!o.support.style&&H&&G=="style"){return o.attr(J.style,"cssText",K)}if(L){J.setAttribute(G,""+K)}var E=!o.support.hrefNormalized&&H&&F?J.getAttribute(G,2):J.getAttribute(G);return E===null?g:E}if(!o.support.opacity&&G=="opacity"){if(L){J.zoom=1;J.filter=(J.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(K)+""=="NaN"?"":"alpha(opacity="+K*100+")")}return J.filter&&J.filter.indexOf("opacity=")>=0?(parseFloat(J.filter.match(/opacity=([^)]*)/)[1])/100)+"":""}G=G.replace(/-([a-z])/ig,function(M,N){return N.toUpperCase()});if(L){J[G]=K}return J[G]},trim:function(E){return(E||"").replace(/^\s+|\s+$/g,"")},makeArray:function(G){var E=[];if(G!=null){var F=G.length;if(F==null||typeof G==="string"||o.isFunction(G)||G.setInterval){E[0]=G}else{while(F){E[--F]=G[F]}}}return E},inArray:function(G,H){for(var E=0,F=H.length;E<F;E++){if(H[E]===G){return E}}return -1},merge:function(H,E){var F=0,G,I=H.length;if(!o.support.getAll){while((G=E[F++])!=null){if(G.nodeType!=8){H[I++]=G}}}else{while((G=E[F++])!=null){H[I++]=G}}return H},unique:function(K){var F=[],E={};try{for(var G=0,H=K.length;G<H;G++){var J=o.data(K[G]);if(!E[J]){E[J]=true;F.push(K[G])}}}catch(I){F=K}return F},grep:function(F,J,E){var G=[];for(var H=0,I=F.length;H<I;H++){if(!E!=!J(F[H],H)){G.push(F[H])}}return G},map:function(E,J){var F=[];for(var G=0,H=E.length;G<H;G++){var I=J(E[G],G);if(I!=null){F[F.length]=I}}return F.concat.apply([],F)}});var C=navigator.userAgent.toLowerCase();o.browser={version:(C.match(/.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/)||[0,"0"])[1],safari:/webkit/.test(C),opera:/opera/.test(C),msie:/msie/.test(C)&&!/opera/.test(C),mozilla:/mozilla/.test(C)&&!/(compatible|webkit)/.test(C)};o.each({parent:function(E){return E.parentNode},parents:function(E){return o.dir(E,"parentNode")},next:function(E){return o.nth(E,2,"nextSibling")},prev:function(E){return o.nth(E,2,"previousSibling")},nextAll:function(E){return o.dir(E,"nextSibling")},prevAll:function(E){return o.dir(E,"previousSibling")},siblings:function(E){return o.sibling(E.parentNode.firstChild,E)},children:function(E){return o.sibling(E.firstChild)},contents:function(E){return o.nodeName(E,"iframe")?E.contentDocument||E.contentWindow.document:o.makeArray(E.childNodes)}},function(E,F){o.fn[E]=function(G){var H=o.map(this,F);if(G&&typeof G=="string"){H=o.multiFilter(G,H)}return this.pushStack(o.unique(H),E,G)}});o.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(E,F){o.fn[E]=function(G){var J=[],L=o(G);for(var K=0,H=L.length;K<H;K++){var I=(K>0?this.clone(true):this).get();o.fn[F].apply(o(L[K]),I);J=J.concat(I)}return this.pushStack(J,E,G)}});o.each({removeAttr:function(E){o.attr(this,E,"");if(this.nodeType==1){this.removeAttribute(E)}},addClass:function(E){o.className.add(this,E)},removeClass:function(E){o.className.remove(this,E)},toggleClass:function(F,E){if(typeof E!=="boolean"){E=!o.className.has(this,F)}o.className[E?"add":"remove"](this,F)},remove:function(E){if(!E||o.filter(E,[this]).length){o("*",this).add([this]).each(function(){o.event.remove(this);o.removeData(this)});if(this.parentNode){this.parentNode.removeChild(this)}}},empty:function(){o(this).children().remove();while(this.firstChild){this.removeChild(this.firstChild)}}},function(E,F){o.fn[E]=function(){return this.each(F,arguments)}});function j(E,F){return E[0]&&parseInt(o.curCSS(E[0],F,true),10)||0}var h="jQuery"+e(),v=0,A={};o.extend({cache:{},data:function(F,E,G){F=F==l?A:F;var H=F[h];if(!H){H=F[h]=++v}if(E&&!o.cache[H]){o.cache[H]={}}if(G!==g){o.cache[H][E]=G}return E?o.cache[H][E]:H},removeData:function(F,E){F=F==l?A:F;var H=F[h];if(E){if(o.cache[H]){delete o.cache[H][E];E="";for(E in o.cache[H]){break}if(!E){o.removeData(F)}}}else{try{delete F[h]}catch(G){if(F.removeAttribute){F.removeAttribute(h)}}delete o.cache[H]}},queue:function(F,E,H){if(F){E=(E||"fx")+"queue";var G=o.data(F,E);if(!G||o.isArray(H)){G=o.data(F,E,o.makeArray(H))}else{if(H){G.push(H)}}}return G},dequeue:function(H,G){var E=o.queue(H,G),F=E.shift();if(!G||G==="fx"){F=E[0]}if(F!==g){F.call(H)}}});o.fn.extend({data:function(E,G){var H=E.split(".");H[1]=H[1]?"."+H[1]:"";if(G===g){var F=this.triggerHandler("getData"+H[1]+"!",[H[0]]);if(F===g&&this.length){F=o.data(this[0],E)}return F===g&&H[1]?this.data(H[0]):F}else{return this.trigger("setData"+H[1]+"!",[H[0],G]).each(function(){o.data(this,E,G)})}},removeData:function(E){return this.each(function(){o.removeData(this,E)})},queue:function(E,F){if(typeof E!=="string"){F=E;E="fx"}if(F===g){return o.queue(this[0],E)}return this.each(function(){var G=o.queue(this,E,F);if(E=="fx"&&G.length==1){G[0].call(this)}})},dequeue:function(E){return this.each(function(){o.dequeue(this,E)})}});
+/*
+ * Sizzle CSS Selector Engine - v0.9.3
+ * Copyright 2009, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ * More information: http://sizzlejs.com/
+ */
+(function(){var R=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?/g,L=0,H=Object.prototype.toString;var F=function(Y,U,ab,ac){ab=ab||[];U=U||document;if(U.nodeType!==1&&U.nodeType!==9){return[]}if(!Y||typeof Y!=="string"){return ab}var Z=[],W,af,ai,T,ad,V,X=true;R.lastIndex=0;while((W=R.exec(Y))!==null){Z.push(W[1]);if(W[2]){V=RegExp.rightContext;break}}if(Z.length>1&&M.exec(Y)){if(Z.length===2&&I.relative[Z[0]]){af=J(Z[0]+Z[1],U)}else{af=I.relative[Z[0]]?[U]:F(Z.shift(),U);while(Z.length){Y=Z.shift();if(I.relative[Y]){Y+=Z.shift()}af=J(Y,af)}}}else{var ae=ac?{expr:Z.pop(),set:E(ac)}:F.find(Z.pop(),Z.length===1&&U.parentNode?U.parentNode:U,Q(U));af=F.filter(ae.expr,ae.set);if(Z.length>0){ai=E(af)}else{X=false}while(Z.length){var ah=Z.pop(),ag=ah;if(!I.relative[ah]){ah=""}else{ag=Z.pop()}if(ag==null){ag=U}I.relative[ah](ai,ag,Q(U))}}if(!ai){ai=af}if(!ai){throw"Syntax error, unrecognized expression: "+(ah||Y)}if(H.call(ai)==="[object Array]"){if(!X){ab.push.apply(ab,ai)}else{if(U.nodeType===1){for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&(ai[aa]===true||ai[aa].nodeType===1&&K(U,ai[aa]))){ab.push(af[aa])}}}else{for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&ai[aa].nodeType===1){ab.push(af[aa])}}}}}else{E(ai,ab)}if(V){F(V,U,ab,ac);if(G){hasDuplicate=false;ab.sort(G);if(hasDuplicate){for(var aa=1;aa<ab.length;aa++){if(ab[aa]===ab[aa-1]){ab.splice(aa--,1)}}}}}return ab};F.matches=function(T,U){return F(T,null,null,U)};F.find=function(aa,T,ab){var Z,X;if(!aa){return[]}for(var W=0,V=I.order.length;W<V;W++){var Y=I.order[W],X;if((X=I.match[Y].exec(aa))){var U=RegExp.leftContext;if(U.substr(U.length-1)!=="\\"){X[1]=(X[1]||"").replace(/\\/g,"");Z=I.find[Y](X,T,ab);if(Z!=null){aa=aa.replace(I.match[Y],"");break}}}}if(!Z){Z=T.getElementsByTagName("*")}return{set:Z,expr:aa}};F.filter=function(ad,ac,ag,W){var V=ad,ai=[],aa=ac,Y,T,Z=ac&&ac[0]&&Q(ac[0]);while(ad&&ac.length){for(var ab in I.filter){if((Y=I.match[ab].exec(ad))!=null){var U=I.filter[ab],ah,af;T=false;if(aa==ai){ai=[]}if(I.preFilter[ab]){Y=I.preFilter[ab](Y,aa,ag,ai,W,Z);if(!Y){T=ah=true}else{if(Y===true){continue}}}if(Y){for(var X=0;(af=aa[X])!=null;X++){if(af){ah=U(af,Y,X,aa);var ae=W^!!ah;if(ag&&ah!=null){if(ae){T=true}else{aa[X]=false}}else{if(ae){ai.push(af);T=true}}}}}if(ah!==g){if(!ag){aa=ai}ad=ad.replace(I.match[ab],"");if(!T){return[]}break}}}if(ad==V){if(T==null){throw"Syntax error, unrecognized expression: "+ad}else{break}}V=ad}return aa};var I=F.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF_-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF_-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF_-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF_-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*_-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF_-]|\\.)+)(?:\((['"]*)((?:\([^\)]+\)|[^\2\(\)]*)+)\2\))?/},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(T){return T.getAttribute("href")}},relative:{"+":function(aa,T,Z){var X=typeof T==="string",ab=X&&!/\W/.test(T),Y=X&&!ab;if(ab&&!Z){T=T.toUpperCase()}for(var W=0,V=aa.length,U;W<V;W++){if((U=aa[W])){while((U=U.previousSibling)&&U.nodeType!==1){}aa[W]=Y||U&&U.nodeName===T?U||false:U===T}}if(Y){F.filter(T,aa,true)}},">":function(Z,U,aa){var X=typeof U==="string";if(X&&!/\W/.test(U)){U=aa?U:U.toUpperCase();for(var V=0,T=Z.length;V<T;V++){var Y=Z[V];if(Y){var W=Y.parentNode;Z[V]=W.nodeName===U?W:false}}}else{for(var V=0,T=Z.length;V<T;V++){var Y=Z[V];if(Y){Z[V]=X?Y.parentNode:Y.parentNode===U}}if(X){F.filter(U,Z,true)}}},"":function(W,U,Y){var V=L++,T=S;if(!U.match(/\W/)){var X=U=Y?U:U.toUpperCase();T=P}T("parentNode",U,V,W,X,Y)},"~":function(W,U,Y){var V=L++,T=S;if(typeof U==="string"&&!U.match(/\W/)){var X=U=Y?U:U.toUpperCase();T=P}T("previousSibling",U,V,W,X,Y)}},find:{ID:function(U,V,W){if(typeof V.getElementById!=="undefined"&&!W){var T=V.getElementById(U[1]);return T?[T]:[]}},NAME:function(V,Y,Z){if(typeof Y.getElementsByName!=="undefined"){var U=[],X=Y.getElementsByName(V[1]);for(var W=0,T=X.length;W<T;W++){if(X[W].getAttribute("name")===V[1]){U.push(X[W])}}return U.length===0?null:U}},TAG:function(T,U){return U.getElementsByTagName(T[1])}},preFilter:{CLASS:function(W,U,V,T,Z,aa){W=" "+W[1].replace(/\\/g,"")+" ";if(aa){return W}for(var X=0,Y;(Y=U[X])!=null;X++){if(Y){if(Z^(Y.className&&(" "+Y.className+" ").indexOf(W)>=0)){if(!V){T.push(Y)}}else{if(V){U[X]=false}}}}return false},ID:function(T){return T[1].replace(/\\/g,"")},TAG:function(U,T){for(var V=0;T[V]===false;V++){}return T[V]&&Q(T[V])?U[1]:U[1].toUpperCase()},CHILD:function(T){if(T[1]=="nth"){var U=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(T[2]=="even"&&"2n"||T[2]=="odd"&&"2n+1"||!/\D/.test(T[2])&&"0n+"+T[2]||T[2]);T[2]=(U[1]+(U[2]||1))-0;T[3]=U[3]-0}T[0]=L++;return T},ATTR:function(X,U,V,T,Y,Z){var W=X[1].replace(/\\/g,"");if(!Z&&I.attrMap[W]){X[1]=I.attrMap[W]}if(X[2]==="~="){X[4]=" "+X[4]+" "}return X},PSEUDO:function(X,U,V,T,Y){if(X[1]==="not"){if(X[3].match(R).length>1||/^\w/.test(X[3])){X[3]=F(X[3],null,null,U)}else{var W=F.filter(X[3],U,V,true^Y);if(!V){T.push.apply(T,W)}return false}}else{if(I.match.POS.test(X[0])||I.match.CHILD.test(X[0])){return true}}return X},POS:function(T){T.unshift(true);return T}},filters:{enabled:function(T){return T.disabled===false&&T.type!=="hidden"},disabled:function(T){return T.disabled===true},checked:function(T){return T.checked===true},selected:function(T){T.parentNode.selectedIndex;return T.selected===true},parent:function(T){return !!T.firstChild},empty:function(T){return !T.firstChild},has:function(V,U,T){return !!F(T[3],V).length},header:function(T){return/h\d/i.test(T.nodeName)},text:function(T){return"text"===T.type},radio:function(T){return"radio"===T.type},checkbox:function(T){return"checkbox"===T.type},file:function(T){return"file"===T.type},password:function(T){return"password"===T.type},submit:function(T){return"submit"===T.type},image:function(T){return"image"===T.type},reset:function(T){return"reset"===T.type},button:function(T){return"button"===T.type||T.nodeName.toUpperCase()==="BUTTON"},input:function(T){return/input|select|textarea|button/i.test(T.nodeName)}},setFilters:{first:function(U,T){return T===0},last:function(V,U,T,W){return U===W.length-1},even:function(U,T){return T%2===0},odd:function(U,T){return T%2===1},lt:function(V,U,T){return U<T[3]-0},gt:function(V,U,T){return U>T[3]-0},nth:function(V,U,T){return T[3]-0==U},eq:function(V,U,T){return T[3]-0==U}},filter:{PSEUDO:function(Z,V,W,aa){var U=V[1],X=I.filters[U];if(X){return X(Z,W,V,aa)}else{if(U==="contains"){return(Z.textContent||Z.innerText||"").indexOf(V[3])>=0}else{if(U==="not"){var Y=V[3];for(var W=0,T=Y.length;W<T;W++){if(Y[W]===Z){return false}}return true}}}},CHILD:function(T,W){var Z=W[1],U=T;switch(Z){case"only":case"first":while(U=U.previousSibling){if(U.nodeType===1){return false}}if(Z=="first"){return true}U=T;case"last":while(U=U.nextSibling){if(U.nodeType===1){return false}}return true;case"nth":var V=W[2],ac=W[3];if(V==1&&ac==0){return true}var Y=W[0],ab=T.parentNode;if(ab&&(ab.sizcache!==Y||!T.nodeIndex)){var X=0;for(U=ab.firstChild;U;U=U.nextSibling){if(U.nodeType===1){U.nodeIndex=++X}}ab.sizcache=Y}var aa=T.nodeIndex-ac;if(V==0){return aa==0}else{return(aa%V==0&&aa/V>=0)}}},ID:function(U,T){return U.nodeType===1&&U.getAttribute("id")===T},TAG:function(U,T){return(T==="*"&&U.nodeType===1)||U.nodeName===T},CLASS:function(U,T){return(" "+(U.className||U.getAttribute("class"))+" ").indexOf(T)>-1},ATTR:function(Y,W){var V=W[1],T=I.attrHandle[V]?I.attrHandle[V](Y):Y[V]!=null?Y[V]:Y.getAttribute(V),Z=T+"",X=W[2],U=W[4];return T==null?X==="!=":X==="="?Z===U:X==="*="?Z.indexOf(U)>=0:X==="~="?(" "+Z+" ").indexOf(U)>=0:!U?Z&&T!==false:X==="!="?Z!=U:X==="^="?Z.indexOf(U)===0:X==="$="?Z.substr(Z.length-U.length)===U:X==="|="?Z===U||Z.substr(0,U.length+1)===U+"-":false},POS:function(X,U,V,Y){var T=U[2],W=I.setFilters[T];if(W){return W(X,V,U,Y)}}}};var M=I.match.POS;for(var O in I.match){I.match[O]=RegExp(I.match[O].source+/(?![^\[]*\])(?![^\(]*\))/.source)}var E=function(U,T){U=Array.prototype.slice.call(U);if(T){T.push.apply(T,U);return T}return U};try{Array.prototype.slice.call(document.documentElement.childNodes)}catch(N){E=function(X,W){var U=W||[];if(H.call(X)==="[object Array]"){Array.prototype.push.apply(U,X)}else{if(typeof X.length==="number"){for(var V=0,T=X.length;V<T;V++){U.push(X[V])}}else{for(var V=0;X[V];V++){U.push(X[V])}}}return U}}var G;if(document.documentElement.compareDocumentPosition){G=function(U,T){var V=U.compareDocumentPosition(T)&4?-1:U===T?0:1;if(V===0){hasDuplicate=true}return V}}else{if("sourceIndex" in document.documentElement){G=function(U,T){var V=U.sourceIndex-T.sourceIndex;if(V===0){hasDuplicate=true}return V}}else{if(document.createRange){G=function(W,U){var V=W.ownerDocument.createRange(),T=U.ownerDocument.createRange();V.selectNode(W);V.collapse(true);T.selectNode(U);T.collapse(true);var X=V.compareBoundaryPoints(Range.START_TO_END,T);if(X===0){hasDuplicate=true}return X}}}}(function(){var U=document.createElement("form"),V="script"+(new Date).getTime();U.innerHTML="<input name='"+V+"'/>";var T=document.documentElement;T.insertBefore(U,T.firstChild);if(!!document.getElementById(V)){I.find.ID=function(X,Y,Z){if(typeof Y.getElementById!=="undefined"&&!Z){var W=Y.getElementById(X[1]);return W?W.id===X[1]||typeof W.getAttributeNode!=="undefined"&&W.getAttributeNode("id").nodeValue===X[1]?[W]:g:[]}};I.filter.ID=function(Y,W){var X=typeof Y.getAttributeNode!=="undefined"&&Y.getAttributeNode("id");return Y.nodeType===1&&X&&X.nodeValue===W}}T.removeChild(U)})();(function(){var T=document.createElement("div");T.appendChild(document.createComment(""));if(T.getElementsByTagName("*").length>0){I.find.TAG=function(U,Y){var X=Y.getElementsByTagName(U[1]);if(U[1]==="*"){var W=[];for(var V=0;X[V];V++){if(X[V].nodeType===1){W.push(X[V])}}X=W}return X}}T.innerHTML="<a href='#'></a>";if(T.firstChild&&typeof T.firstChild.getAttribute!=="undefined"&&T.firstChild.getAttribute("href")!=="#"){I.attrHandle.href=function(U){return U.getAttribute("href",2)}}})();if(document.querySelectorAll){(function(){var T=F,U=document.createElement("div");U.innerHTML="<p class='TEST'></p>";if(U.querySelectorAll&&U.querySelectorAll(".TEST").length===0){return}F=function(Y,X,V,W){X=X||document;if(!W&&X.nodeType===9&&!Q(X)){try{return E(X.querySelectorAll(Y),V)}catch(Z){}}return T(Y,X,V,W)};F.find=T.find;F.filter=T.filter;F.selectors=T.selectors;F.matches=T.matches})()}if(document.getElementsByClassName&&document.documentElement.getElementsByClassName){(function(){var T=document.createElement("div");T.innerHTML="<div class='test e'></div><div class='test'></div>";if(T.getElementsByClassName("e").length===0){return}T.lastChild.className="e";if(T.getElementsByClassName("e").length===1){return}I.order.splice(1,0,"CLASS");I.find.CLASS=function(U,V,W){if(typeof V.getElementsByClassName!=="undefined"&&!W){return V.getElementsByClassName(U[1])}}})()}function P(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W<V;W++){var T=ad[W];if(T){if(ab&&T.nodeType===1){T.sizcache=Y;T.sizset=W}T=T[U];var X=false;while(T){if(T.sizcache===Y){X=ad[T.sizset];break}if(T.nodeType===1&&!ac){T.sizcache=Y;T.sizset=W}if(T.nodeName===Z){X=T;break}T=T[U]}ad[W]=X}}}function S(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W<V;W++){var T=ad[W];if(T){if(ab&&T.nodeType===1){T.sizcache=Y;T.sizset=W}T=T[U];var X=false;while(T){if(T.sizcache===Y){X=ad[T.sizset];break}if(T.nodeType===1){if(!ac){T.sizcache=Y;T.sizset=W}if(typeof Z!=="string"){if(T===Z){X=true;break}}else{if(F.filter(Z,[T]).length>0){X=T;break}}}T=T[U]}ad[W]=X}}}var K=document.compareDocumentPosition?function(U,T){return U.compareDocumentPosition(T)&16}:function(U,T){return U!==T&&(U.contains?U.contains(T):true)};var Q=function(T){return T.nodeType===9&&T.documentElement.nodeName!=="HTML"||!!T.ownerDocument&&Q(T.ownerDocument)};var J=function(T,aa){var W=[],X="",Y,V=aa.nodeType?[aa]:aa;while((Y=I.match.PSEUDO.exec(T))){X+=Y[0];T=T.replace(I.match.PSEUDO,"")}T=I.relative[T]?T+"*":T;for(var Z=0,U=V.length;Z<U;Z++){F(T,V[Z],W)}return F.filter(X,W)};o.find=F;o.filter=F.filter;o.expr=F.selectors;o.expr[":"]=o.expr.filters;F.selectors.filters.hidden=function(T){return T.offsetWidth===0||T.offsetHeight===0};F.selectors.filters.visible=function(T){return T.offsetWidth>0||T.offsetHeight>0};F.selectors.filters.animated=function(T){return o.grep(o.timers,function(U){return T===U.elem}).length};o.multiFilter=function(V,T,U){if(U){V=":not("+V+")"}return F.matches(V,T)};o.dir=function(V,U){var T=[],W=V[U];while(W&&W!=document){if(W.nodeType==1){T.push(W)}W=W[U]}return T};o.nth=function(X,T,V,W){T=T||1;var U=0;for(;X;X=X[V]){if(X.nodeType==1&&++U==T){break}}return X};o.sibling=function(V,U){var T=[];for(;V;V=V.nextSibling){if(V.nodeType==1&&V!=U){T.push(V)}}return T};return;l.Sizzle=F})();o.event={add:function(I,F,H,K){if(I.nodeType==3||I.nodeType==8){return}if(I.setInterval&&I!=l){I=l}if(!H.guid){H.guid=this.guid++}if(K!==g){var G=H;H=this.proxy(G);H.data=K}var E=o.data(I,"events")||o.data(I,"events",{}),J=o.data(I,"handle")||o.data(I,"handle",function(){return typeof o!=="undefined"&&!o.event.triggered?o.event.handle.apply(arguments.callee.elem,arguments):g});J.elem=I;o.each(F.split(/\s+/),function(M,N){var O=N.split(".");N=O.shift();H.type=O.slice().sort().join(".");var L=E[N];if(o.event.specialAll[N]){o.event.specialAll[N].setup.call(I,K,O)}if(!L){L=E[N]={};if(!o.event.special[N]||o.event.special[N].setup.call(I,K,O)===false){if(I.addEventListener){I.addEventListener(N,J,false)}else{if(I.attachEvent){I.attachEvent("on"+N,J)}}}}L[H.guid]=H;o.event.global[N]=true});I=null},guid:1,global:{},remove:function(K,H,J){if(K.nodeType==3||K.nodeType==8){return}var G=o.data(K,"events"),F,E;if(G){if(H===g||(typeof H==="string"&&H.charAt(0)==".")){for(var I in G){this.remove(K,I+(H||""))}}else{if(H.type){J=H.handler;H=H.type}o.each(H.split(/\s+/),function(M,O){var Q=O.split(".");O=Q.shift();var N=RegExp("(^|\\.)"+Q.slice().sort().join(".*\\.")+"(\\.|$)");if(G[O]){if(J){delete G[O][J.guid]}else{for(var P in G[O]){if(N.test(G[O][P].type)){delete G[O][P]}}}if(o.event.specialAll[O]){o.event.specialAll[O].teardown.call(K,Q)}for(F in G[O]){break}if(!F){if(!o.event.special[O]||o.event.special[O].teardown.call(K,Q)===false){if(K.removeEventListener){K.removeEventListener(O,o.data(K,"handle"),false)}else{if(K.detachEvent){K.detachEvent("on"+O,o.data(K,"handle"))}}}F=null;delete G[O]}}})}for(F in G){break}if(!F){var L=o.data(K,"handle");if(L){L.elem=null}o.removeData(K,"events");o.removeData(K,"handle")}}},trigger:function(I,K,H,E){var G=I.type||I;if(!E){I=typeof I==="object"?I[h]?I:o.extend(o.Event(G),I):o.Event(G);if(G.indexOf("!")>=0){I.type=G=G.slice(0,-1);I.exclusive=true}if(!H){I.stopPropagation();if(this.global[G]){o.each(o.cache,function(){if(this.events&&this.events[G]){o.event.trigger(I,K,this.handle.elem)}})}}if(!H||H.nodeType==3||H.nodeType==8){return g}I.result=g;I.target=H;K=o.makeArray(K);K.unshift(I)}I.currentTarget=H;var J=o.data(H,"handle");if(J){J.apply(H,K)}if((!H[G]||(o.nodeName(H,"a")&&G=="click"))&&H["on"+G]&&H["on"+G].apply(H,K)===false){I.result=false}if(!E&&H[G]&&!I.isDefaultPrevented()&&!(o.nodeName(H,"a")&&G=="click")){this.triggered=true;try{H[G]()}catch(L){}}this.triggered=false;if(!I.isPropagationStopped()){var F=H.parentNode||H.ownerDocument;if(F){o.event.trigger(I,K,F,true)}}},handle:function(K){var J,E;K=arguments[0]=o.event.fix(K||l.event);K.currentTarget=this;var L=K.type.split(".");K.type=L.shift();J=!L.length&&!K.exclusive;var I=RegExp("(^|\\.)"+L.slice().sort().join(".*\\.")+"(\\.|$)");E=(o.data(this,"events")||{})[K.type];for(var G in E){var H=E[G];if(J||I.test(H.type)){K.handler=H;K.data=H.data;var F=H.apply(this,arguments);if(F!==g){K.result=F;if(F===false){K.preventDefault();K.stopPropagation()}}if(K.isImmediatePropagationStopped()){break}}}},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),fix:function(H){if(H[h]){return H}var F=H;H=o.Event(F);for(var G=this.props.length,J;G;){J=this.props[--G];H[J]=F[J]}if(!H.target){H.target=H.srcElement||document}if(H.target.nodeType==3){H.target=H.target.parentNode}if(!H.relatedTarget&&H.fromElement){H.relatedTarget=H.fromElement==H.target?H.toElement:H.fromElement}if(H.pageX==null&&H.clientX!=null){var I=document.documentElement,E=document.body;H.pageX=H.clientX+(I&&I.scrollLeft||E&&E.scrollLeft||0)-(I.clientLeft||0);H.pageY=H.clientY+(I&&I.scrollTop||E&&E.scrollTop||0)-(I.clientTop||0)}if(!H.which&&((H.charCode||H.charCode===0)?H.charCode:H.keyCode)){H.which=H.charCode||H.keyCode}if(!H.metaKey&&H.ctrlKey){H.metaKey=H.ctrlKey}if(!H.which&&H.button){H.which=(H.button&1?1:(H.button&2?3:(H.button&4?2:0)))}return H},proxy:function(F,E){E=E||function(){return F.apply(this,arguments)};E.guid=F.guid=F.guid||E.guid||this.guid++;return E},special:{ready:{setup:B,teardown:function(){}}},specialAll:{live:{setup:function(E,F){o.event.add(this,F[0],c)},teardown:function(G){if(G.length){var E=0,F=RegExp("(^|\\.)"+G[0]+"(\\.|$)");o.each((o.data(this,"events").live||{}),function(){if(F.test(this.type)){E++}});if(E<1){o.event.remove(this,G[0],c)}}}}}};o.Event=function(E){if(!this.preventDefault){return new o.Event(E)}if(E&&E.type){this.originalEvent=E;this.type=E.type}else{this.type=E}this.timeStamp=e();this[h]=true};function k(){return false}function u(){return true}o.Event.prototype={preventDefault:function(){this.isDefaultPrevented=u;var E=this.originalEvent;if(!E){return}if(E.preventDefault){E.preventDefault()}E.returnValue=false},stopPropagation:function(){this.isPropagationStopped=u;var E=this.originalEvent;if(!E){return}if(E.stopPropagation){E.stopPropagation()}E.cancelBubble=true},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=u;this.stopPropagation()},isDefaultPrevented:k,isPropagationStopped:k,isImmediatePropagationStopped:k};var a=function(F){var E=F.relatedTarget;while(E&&E!=this){try{E=E.parentNode}catch(G){E=this}}if(E!=this){F.type=F.data;o.event.handle.apply(this,arguments)}};o.each({mouseover:"mouseenter",mouseout:"mouseleave"},function(F,E){o.event.special[E]={setup:function(){o.event.add(this,F,a,E)},teardown:function(){o.event.remove(this,F,a)}}});o.fn.extend({bind:function(F,G,E){return F=="unload"?this.one(F,G,E):this.each(function(){o.event.add(this,F,E||G,E&&G)})},one:function(G,H,F){var E=o.event.proxy(F||H,function(I){o(this).unbind(I,E);return(F||H).apply(this,arguments)});return this.each(function(){o.event.add(this,G,E,F&&H)})},unbind:function(F,E){return this.each(function(){o.event.remove(this,F,E)})},trigger:function(E,F){return this.each(function(){o.event.trigger(E,F,this)})},triggerHandler:function(E,G){if(this[0]){var F=o.Event(E);F.preventDefault();F.stopPropagation();o.event.trigger(F,G,this[0]);return F.result}},toggle:function(G){var E=arguments,F=1;while(F<E.length){o.event.proxy(G,E[F++])}return this.click(o.event.proxy(G,function(H){this.lastToggle=(this.lastToggle||0)%F;H.preventDefault();return E[this.lastToggle++].apply(this,arguments)||false}))},hover:function(E,F){return this.mouseenter(E).mouseleave(F)},ready:function(E){B();if(o.isReady){E.call(document,o)}else{o.readyList.push(E)}return this},live:function(G,F){var E=o.event.proxy(F);E.guid+=this.selector+G;o(document).bind(i(G,this.selector),this.selector,E);return this},die:function(F,E){o(document).unbind(i(F,this.selector),E?{guid:E.guid+this.selector+F}:null);return this}});function c(H){var E=RegExp("(^|\\.)"+H.type+"(\\.|$)"),G=true,F=[];o.each(o.data(this,"events").live||[],function(I,J){if(E.test(J.type)){var K=o(H.target).closest(J.data)[0];if(K){F.push({elem:K,fn:J})}}});F.sort(function(J,I){return o.data(J.elem,"closest")-o.data(I.elem,"closest")});o.each(F,function(){if(this.fn.call(this.elem,H,this.fn.data)===false){return(G=false)}});return G}function i(F,E){return["live",F,E.replace(/\./g,"`").replace(/ /g,"|")].join(".")}o.extend({isReady:false,readyList:[],ready:function(){if(!o.isReady){o.isReady=true;if(o.readyList){o.each(o.readyList,function(){this.call(document,o)});o.readyList=null}o(document).triggerHandler("ready")}}});var x=false;function B(){if(x){return}x=true;if(document.addEventListener){document.addEventListener("DOMContentLoaded",function(){document.removeEventListener("DOMContentLoaded",arguments.callee,false);o.ready()},false)}else{if(document.attachEvent){document.attachEvent("onreadystatechange",function(){if(document.readyState==="complete"){document.detachEvent("onreadystatechange",arguments.callee);o.ready()}});if(document.documentElement.doScroll&&l==l.top){(function(){if(o.isReady){return}try{document.documentElement.doScroll("left")}catch(E){setTimeout(arguments.callee,0);return}o.ready()})()}}}o.event.add(l,"load",o.ready)}o.each(("blur,focus,load,resize,scroll,unload,click,dblclick,mousedown,mouseup,mousemove,mouseover,mouseout,mouseenter,mouseleave,change,select,submit,keydown,keypress,keyup,error").split(","),function(F,E){o.fn[E]=function(G){return G?this.bind(E,G):this.trigger(E)}});o(l).bind("unload",function(){for(var E in o.cache){if(E!=1&&o.cache[E].handle){o.event.remove(o.cache[E].handle.elem)}}});(function(){o.support={};var F=document.documentElement,G=document.createElement("script"),K=document.createElement("div"),J="script"+(new Date).getTime();K.style.display="none";K.innerHTML=' <link/><table></table><a href="/a" style="color:red;float:left;opacity:.5;">a</a><select><option>text</option></select><object><param/></object>';var H=K.getElementsByTagName("*"),E=K.getElementsByTagName("a")[0];if(!H||!H.length||!E){return}o.support={leadingWhitespace:K.firstChild.nodeType==3,tbody:!K.getElementsByTagName("tbody").length,objectAll:!!K.getElementsByTagName("object")[0].getElementsByTagName("*").length,htmlSerialize:!!K.getElementsByTagName("link").length,style:/red/.test(E.getAttribute("style")),hrefNormalized:E.getAttribute("href")==="/a",opacity:E.style.opacity==="0.5",cssFloat:!!E.style.cssFloat,scriptEval:false,noCloneEvent:true,boxModel:null};G.type="text/javascript";try{G.appendChild(document.createTextNode("window."+J+"=1;"))}catch(I){}F.insertBefore(G,F.firstChild);if(l[J]){o.support.scriptEval=true;delete l[J]}F.removeChild(G);if(K.attachEvent&&K.fireEvent){K.attachEvent("onclick",function(){o.support.noCloneEvent=false;K.detachEvent("onclick",arguments.callee)});K.cloneNode(true).fireEvent("onclick")}o(function(){var L=document.createElement("div");L.style.width=L.style.paddingLeft="1px";document.body.appendChild(L);o.boxModel=o.support.boxModel=L.offsetWidth===2;document.body.removeChild(L).style.display="none"})})();var w=o.support.cssFloat?"cssFloat":"styleFloat";o.props={"for":"htmlFor","class":"className","float":w,cssFloat:w,styleFloat:w,readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",tabindex:"tabIndex"};o.fn.extend({_load:o.fn.load,load:function(G,J,K){if(typeof G!=="string"){return this._load(G)}var I=G.indexOf(" ");if(I>=0){var E=G.slice(I,G.length);G=G.slice(0,I)}var H="GET";if(J){if(o.isFunction(J)){K=J;J=null}else{if(typeof J==="object"){J=o.param(J);H="POST"}}}var F=this;o.ajax({url:G,type:H,dataType:"html",data:J,complete:function(M,L){if(L=="success"||L=="notmodified"){F.html(E?o("<div/>").append(M.responseText.replace(/<script(.|\s)*?\/script>/g,"")).find(E):M.responseText)}if(K){F.each(K,[M.responseText,L,M])}}});return this},serialize:function(){return o.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?o.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password|search/i.test(this.type))}).map(function(E,F){var G=o(this).val();return G==null?null:o.isArray(G)?o.map(G,function(I,H){return{name:F.name,value:I}}):{name:F.name,value:G}}).get()}});o.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(E,F){o.fn[F]=function(G){return this.bind(F,G)}});var r=e();o.extend({get:function(E,G,H,F){if(o.isFunction(G)){H=G;G=null}return o.ajax({type:"GET",url:E,data:G,success:H,dataType:F})},getScript:function(E,F){return o.get(E,null,F,"script")},getJSON:function(E,F,G){return o.get(E,F,G,"json")},post:function(E,G,H,F){if(o.isFunction(G)){H=G;G={}}return o.ajax({type:"POST",url:E,data:G,success:H,dataType:F})},ajaxSetup:function(E){o.extend(o.ajaxSettings,E)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return l.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest()},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(M){M=o.extend(true,M,o.extend(true,{},o.ajaxSettings,M));var W,F=/=\?(&|$)/g,R,V,G=M.type.toUpperCase();if(M.data&&M.processData&&typeof M.data!=="string"){M.data=o.param(M.data)}if(M.dataType=="jsonp"){if(G=="GET"){if(!M.url.match(F)){M.url+=(M.url.match(/\?/)?"&":"?")+(M.jsonp||"callback")+"=?"}}else{if(!M.data||!M.data.match(F)){M.data=(M.data?M.data+"&":"")+(M.jsonp||"callback")+"=?"}}M.dataType="json"}if(M.dataType=="json"&&(M.data&&M.data.match(F)||M.url.match(F))){W="jsonp"+r++;if(M.data){M.data=(M.data+"").replace(F,"="+W+"$1")}M.url=M.url.replace(F,"="+W+"$1");M.dataType="script";l[W]=function(X){V=X;I();L();l[W]=g;try{delete l[W]}catch(Y){}if(H){H.removeChild(T)}}}if(M.dataType=="script"&&M.cache==null){M.cache=false}if(M.cache===false&&G=="GET"){var E=e();var U=M.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+E+"$2");M.url=U+((U==M.url)?(M.url.match(/\?/)?"&":"?")+"_="+E:"")}if(M.data&&G=="GET"){M.url+=(M.url.match(/\?/)?"&":"?")+M.data;M.data=null}if(M.global&&!o.active++){o.event.trigger("ajaxStart")}var Q=/^(\w+:)?\/\/([^\/?#]+)/.exec(M.url);if(M.dataType=="script"&&G=="GET"&&Q&&(Q[1]&&Q[1]!=location.protocol||Q[2]!=location.host)){var H=document.getElementsByTagName("head")[0];var T=document.createElement("script");T.src=M.url;if(M.scriptCharset){T.charset=M.scriptCharset}if(!W){var O=false;T.onload=T.onreadystatechange=function(){if(!O&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){O=true;I();L();T.onload=T.onreadystatechange=null;H.removeChild(T)}}}H.appendChild(T);return g}var K=false;var J=M.xhr();if(M.username){J.open(G,M.url,M.async,M.username,M.password)}else{J.open(G,M.url,M.async)}try{if(M.data){J.setRequestHeader("Content-Type",M.contentType)}if(M.ifModified){J.setRequestHeader("If-Modified-Since",o.lastModified[M.url]||"Thu, 01 Jan 1970 00:00:00 GMT")}J.setRequestHeader("X-Requested-With","XMLHttpRequest");J.setRequestHeader("Accept",M.dataType&&M.accepts[M.dataType]?M.accepts[M.dataType]+", */*":M.accepts._default)}catch(S){}if(M.beforeSend&&M.beforeSend(J,M)===false){if(M.global&&!--o.active){o.event.trigger("ajaxStop")}J.abort();return false}if(M.global){o.event.trigger("ajaxSend",[J,M])}var N=function(X){if(J.readyState==0){if(P){clearInterval(P);P=null;if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}}else{if(!K&&J&&(J.readyState==4||X=="timeout")){K=true;if(P){clearInterval(P);P=null}R=X=="timeout"?"timeout":!o.httpSuccess(J)?"error":M.ifModified&&o.httpNotModified(J,M.url)?"notmodified":"success";if(R=="success"){try{V=o.httpData(J,M.dataType,M)}catch(Z){R="parsererror"}}if(R=="success"){var Y;try{Y=J.getResponseHeader("Last-Modified")}catch(Z){}if(M.ifModified&&Y){o.lastModified[M.url]=Y}if(!W){I()}}else{o.handleError(M,J,R)}L();if(X){J.abort()}if(M.async){J=null}}}};if(M.async){var P=setInterval(N,13);if(M.timeout>0){setTimeout(function(){if(J&&!K){N("timeout")}},M.timeout)}}try{J.send(M.data)}catch(S){o.handleError(M,J,null,S)}if(!M.async){N()}function I(){if(M.success){M.success(V,R)}if(M.global){o.event.trigger("ajaxSuccess",[J,M])}}function L(){if(M.complete){M.complete(J,R)}if(M.global){o.event.trigger("ajaxComplete",[J,M])}if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}return J},handleError:function(F,H,E,G){if(F.error){F.error(H,E,G)}if(F.global){o.event.trigger("ajaxError",[H,F,G])}},active:0,httpSuccess:function(F){try{return !F.status&&location.protocol=="file:"||(F.status>=200&&F.status<300)||F.status==304||F.status==1223}catch(E){}return false},httpNotModified:function(G,E){try{var H=G.getResponseHeader("Last-Modified");return G.status==304||H==o.lastModified[E]}catch(F){}return false},httpData:function(J,H,G){var F=J.getResponseHeader("content-type"),E=H=="xml"||!H&&F&&F.indexOf("xml")>=0,I=E?J.responseXML:J.responseText;if(E&&I.documentElement.tagName=="parsererror"){throw"parsererror"}if(G&&G.dataFilter){I=G.dataFilter(I,H)}if(typeof I==="string"){if(H=="script"){o.globalEval(I)}if(H=="json"){I=l["eval"]("("+I+")")}}return I},param:function(E){var G=[];function H(I,J){G[G.length]=encodeURIComponent(I)+"="+encodeURIComponent(J)}if(o.isArray(E)||E.jquery){o.each(E,function(){H(this.name,this.value)})}else{for(var F in E){if(o.isArray(E[F])){o.each(E[F],function(){H(F,this)})}else{H(F,o.isFunction(E[F])?E[F]():E[F])}}}return G.join("&").replace(/%20/g,"+")}});var m={},n,d=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];function t(F,E){var G={};o.each(d.concat.apply([],d.slice(0,E)),function(){G[this]=F});return G}o.fn.extend({show:function(J,L){if(J){return this.animate(t("show",3),J,L)}else{for(var H=0,F=this.length;H<F;H++){var E=o.data(this[H],"olddisplay");this[H].style.display=E||"";if(o.css(this[H],"display")==="none"){var G=this[H].tagName,K;if(m[G]){K=m[G]}else{var I=o("<"+G+" />").appendTo("body");K=I.css("display");if(K==="none"){K="block"}I.remove();m[G]=K}o.data(this[H],"olddisplay",K)}}for(var H=0,F=this.length;H<F;H++){this[H].style.display=o.data(this[H],"olddisplay")||""}return this}},hide:function(H,I){if(H){return this.animate(t("hide",3),H,I)}else{for(var G=0,F=this.length;G<F;G++){var E=o.data(this[G],"olddisplay");if(!E&&E!=="none"){o.data(this[G],"olddisplay",o.css(this[G],"display"))}}for(var G=0,F=this.length;G<F;G++){this[G].style.display="none"}return this}},_toggle:o.fn.toggle,toggle:function(G,F){var E=typeof G==="boolean";return o.isFunction(G)&&o.isFunction(F)?this._toggle.apply(this,arguments):G==null||E?this.each(function(){var H=E?G:o(this).is(":hidden");o(this)[H?"show":"hide"]()}):this.animate(t("toggle",3),G,F)},fadeTo:function(E,G,F){return this.animate({opacity:G},E,F)},animate:function(I,F,H,G){var E=o.speed(F,H,G);return this[E.queue===false?"each":"queue"](function(){var K=o.extend({},E),M,L=this.nodeType==1&&o(this).is(":hidden"),J=this;for(M in I){if(I[M]=="hide"&&L||I[M]=="show"&&!L){return K.complete.call(this)}if((M=="height"||M=="width")&&this.style){K.display=o.css(this,"display");K.overflow=this.style.overflow}}if(K.overflow!=null){this.style.overflow="hidden"}K.curAnim=o.extend({},I);o.each(I,function(O,S){var R=new o.fx(J,K,O);if(/toggle|show|hide/.test(S)){R[S=="toggle"?L?"show":"hide":S](I)}else{var Q=S.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),T=R.cur(true)||0;if(Q){var N=parseFloat(Q[2]),P=Q[3]||"px";if(P!="px"){J.style[O]=(N||1)+P;T=((N||1)/R.cur(true))*T;J.style[O]=T+P}if(Q[1]){N=((Q[1]=="-="?-1:1)*N)+T}R.custom(T,N,P)}else{R.custom(T,S,"")}}});return true})},stop:function(F,E){var G=o.timers;if(F){this.queue([])}this.each(function(){for(var H=G.length-1;H>=0;H--){if(G[H].elem==this){if(E){G[H](true)}G.splice(H,1)}}});if(!E){this.dequeue()}return this}});o.each({slideDown:t("show",1),slideUp:t("hide",1),slideToggle:t("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(E,F){o.fn[E]=function(G,H){return this.animate(F,G,H)}});o.extend({speed:function(G,H,F){var E=typeof G==="object"?G:{complete:F||!F&&H||o.isFunction(G)&&G,duration:G,easing:F&&H||H&&!o.isFunction(H)&&H};E.duration=o.fx.off?0:typeof E.duration==="number"?E.duration:o.fx.speeds[E.duration]||o.fx.speeds._default;E.old=E.complete;E.complete=function(){if(E.queue!==false){o(this).dequeue()}if(o.isFunction(E.old)){E.old.call(this)}};return E},easing:{linear:function(G,H,E,F){return E+F*G},swing:function(G,H,E,F){return((-Math.cos(G*Math.PI)/2)+0.5)*F+E}},timers:[],fx:function(F,E,G){this.options=E;this.elem=F;this.prop=G;if(!E.orig){E.orig={}}}});o.fx.prototype={update:function(){if(this.options.step){this.options.step.call(this.elem,this.now,this)}(o.fx.step[this.prop]||o.fx.step._default)(this);if((this.prop=="height"||this.prop=="width")&&this.elem.style){this.elem.style.display="block"}},cur:function(F){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null)){return this.elem[this.prop]}var E=parseFloat(o.css(this.elem,this.prop,F));return E&&E>-10000?E:parseFloat(o.curCSS(this.elem,this.prop))||0},custom:function(I,H,G){this.startTime=e();this.start=I;this.end=H;this.unit=G||this.unit||"px";this.now=this.start;this.pos=this.state=0;var E=this;function F(J){return E.step(J)}F.elem=this.elem;if(F()&&o.timers.push(F)&&!n){n=setInterval(function(){var K=o.timers;for(var J=0;J<K.length;J++){if(!K[J]()){K.splice(J--,1)}}if(!K.length){clearInterval(n);n=g}},13)}},show:function(){this.options.orig[this.prop]=o.attr(this.elem.style,this.prop);this.options.show=true;this.custom(this.prop=="width"||this.prop=="height"?1:0,this.cur());o(this.elem).show()},hide:function(){this.options.orig[this.prop]=o.attr(this.elem.style,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(H){var G=e();if(H||G>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var E=true;for(var F in this.options.curAnim){if(this.options.curAnim[F]!==true){E=false}}if(E){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(o.css(this.elem,"display")=="none"){this.elem.style.display="block"}}if(this.options.hide){o(this.elem).hide()}if(this.options.hide||this.options.show){for(var I in this.options.curAnim){o.attr(this.elem.style,I,this.options.orig[I])}}this.options.complete.call(this.elem)}return false}else{var J=G-this.startTime;this.state=J/this.options.duration;this.pos=o.easing[this.options.easing||(o.easing.swing?"swing":"linear")](this.state,J,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update()}return true}};o.extend(o.fx,{speeds:{slow:600,fast:200,_default:400},step:{opacity:function(E){o.attr(E.elem.style,"opacity",E.now)},_default:function(E){if(E.elem.style&&E.elem.style[E.prop]!=null){E.elem.style[E.prop]=E.now+E.unit}else{E.elem[E.prop]=E.now}}}});if(document.documentElement.getBoundingClientRect){o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}var G=this[0].getBoundingClientRect(),J=this[0].ownerDocument,F=J.body,E=J.documentElement,L=E.clientTop||F.clientTop||0,K=E.clientLeft||F.clientLeft||0,I=G.top+(self.pageYOffset||o.boxModel&&E.scrollTop||F.scrollTop)-L,H=G.left+(self.pageXOffset||o.boxModel&&E.scrollLeft||F.scrollLeft)-K;return{top:I,left:H}}}else{o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}o.offset.initialized||o.offset.initialize();var J=this[0],G=J.offsetParent,F=J,O=J.ownerDocument,M,H=O.documentElement,K=O.body,L=O.defaultView,E=L.getComputedStyle(J,null),N=J.offsetTop,I=J.offsetLeft;while((J=J.parentNode)&&J!==K&&J!==H){M=L.getComputedStyle(J,null);N-=J.scrollTop,I-=J.scrollLeft;if(J===G){N+=J.offsetTop,I+=J.offsetLeft;if(o.offset.doesNotAddBorder&&!(o.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(J.tagName))){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}F=G,G=J.offsetParent}if(o.offset.subtractsBorderForOverflowNotVisible&&M.overflow!=="visible"){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}E=M}if(E.position==="relative"||E.position==="static"){N+=K.offsetTop,I+=K.offsetLeft}if(E.position==="fixed"){N+=Math.max(H.scrollTop,K.scrollTop),I+=Math.max(H.scrollLeft,K.scrollLeft)}return{top:N,left:I}}}o.offset={initialize:function(){if(this.initialized){return}var L=document.body,F=document.createElement("div"),H,G,N,I,M,E,J=L.style.marginTop,K='<div style="position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;"><div></div></div><table style="position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;" cellpadding="0" cellspacing="0"><tr><td></td></tr></table>';M={position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"};for(E in M){F.style[E]=M[E]}F.innerHTML=K;L.insertBefore(F,L.firstChild);H=F.firstChild,G=H.firstChild,I=H.nextSibling.firstChild.firstChild;this.doesNotAddBorder=(G.offsetTop!==5);this.doesAddBorderForTableAndCells=(I.offsetTop===5);H.style.overflow="hidden",H.style.position="relative";this.subtractsBorderForOverflowNotVisible=(G.offsetTop===-5);L.style.marginTop="1px";this.doesNotIncludeMarginInBodyOffset=(L.offsetTop===0);L.style.marginTop=J;L.removeChild(F);this.initialized=true},bodyOffset:function(E){o.offset.initialized||o.offset.initialize();var G=E.offsetTop,F=E.offsetLeft;if(o.offset.doesNotIncludeMarginInBodyOffset){G+=parseInt(o.curCSS(E,"marginTop",true),10)||0,F+=parseInt(o.curCSS(E,"marginLeft",true),10)||0}return{top:G,left:F}}};o.fn.extend({position:function(){var I=0,H=0,F;if(this[0]){var G=this.offsetParent(),J=this.offset(),E=/^body|html$/i.test(G[0].tagName)?{top:0,left:0}:G.offset();J.top-=j(this,"marginTop");J.left-=j(this,"marginLeft");E.top+=j(G,"borderTopWidth");E.left+=j(G,"borderLeftWidth");F={top:J.top-E.top,left:J.left-E.left}}return F},offsetParent:function(){var E=this[0].offsetParent||document.body;while(E&&(!/^body|html$/i.test(E.tagName)&&o.css(E,"position")=="static")){E=E.offsetParent}return o(E)}});o.each(["Left","Top"],function(F,E){var G="scroll"+E;o.fn[G]=function(H){if(!this[0]){return null}return H!==g?this.each(function(){this==l||this==document?l.scrollTo(!F?H:o(l).scrollLeft(),F?H:o(l).scrollTop()):this[G]=H}):this[0]==l||this[0]==document?self[F?"pageYOffset":"pageXOffset"]||o.boxModel&&document.documentElement[G]||document.body[G]:this[0][G]}});o.each(["Height","Width"],function(I,G){var E=I?"Left":"Top",H=I?"Right":"Bottom",F=G.toLowerCase();o.fn["inner"+G]=function(){return this[0]?o.css(this[0],F,false,"padding"):null};o.fn["outer"+G]=function(K){return this[0]?o.css(this[0],F,false,K?"margin":"border"):null};var J=G.toLowerCase();o.fn[J]=function(K){return this[0]==l?document.compatMode=="CSS1Compat"&&document.documentElement["client"+G]||document.body["client"+G]:this[0]==document?Math.max(document.documentElement["client"+G],document.body["scroll"+G],document.documentElement["scroll"+G],document.body["offset"+G],document.documentElement["offset"+G]):K===g?(this.length?o.css(this[0],J):null):this.css(J,typeof K==="string"?K:K+"px")}})})();
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_media/style.css Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,104 @@
+/* Basic layout and typography. */
+body {
+ background: #f5f5f5;
+}
+div#main-wrap {
+ width: 65em;
+ margin: 0em auto;
+ padding: 3em;
+}
+div#head-wrap {
+ text-align: center;
+ padding: 1.5em 0em .5em;
+ background-color: #ccc;
+ border-bottom: 1px solid #bbb;
+}
+div#footer {
+ color: #666;
+ text-align: center;
+ font-style: italic;
+}
+
+/* Links. */
+a {
+ text-decoration: none;
+ font-weight: bold;
+ color: #297E00;
+}
+a:hover {
+ color: #EA0076;
+}
+div#head-wrap a {
+ color: inherit;
+}
+
+/* Tables. */
+table {
+ width: 100%;
+ border: 1px solid #666;
+ background: #f2f2f2;
+}
+table td {
+ border: none;
+}
+table tr.odd {
+ background: #eee;
+}
+table tr td.last {
+ text-align: right;
+}
+
+/* Review pages. */
+div.filename-header {
+ background-color: #ccc;
+ border: 1px solid #c5c5c5;
+ padding: 1em;
+ width: 75em;
+ margin-left: -5em;
+ margin-bottom: 1.5em;
+ margin-top: 1.5em;
+}
+div.filename-header h3 {
+ margin: 0em;
+}
+
+/* Comments. */
+.comment {
+ white-space: normal;
+ background: #FBEAD0;
+ border: 1px dashed #666;
+ font-family: Consolas, Monaco, "Courier New", Courier, monospace;
+ padding: 0.75em;
+ margin-bottom: 1.5em;
+}
+.comment div.avatar {
+ border: 1px solid black;
+ float: right;
+}
+.comment div.message {
+ margin-top: 1.5em;
+ white-space: pre;
+}
+div#comment-review form {
+ margin-bottom: 3em;
+ display: none;
+}
+div#comment-file form {
+}
+
+/* Diffs. */
+div.diff {
+ overflow: auto;
+}
+div.diff table tr {
+ white-space: pre;
+}
+table tr.add {
+ background: #DBF3D1;
+}
+table tr.rem {
+ background: #FBDBDA;
+}
+div.diff .comment {
+ margin-bottom: 0em;
+}
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_templates/base.html Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,26 @@
+$def with (rd, content)
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+
+<html>
+ <head>
+ <title>${ basename(rd.target.root) } / hg-review</title>
+ <link rel="stylesheet" href="/media/aal.css" type="text/css" media="screen" />
+ <link rel="stylesheet" href="/media/style.css" type="text/css" media="screen" />
+
+ <script type="text/javascript" src="/media/jquery-1.3.2.min.js"></script>
+ <script type="text/javascript" src="/media/comments.js"></script>
+ </head>
+
+ <body>
+ <div id="head-wrap">
+ <h1><a href="/">${ basename(rd.target.root) }</a></h1>
+ </div>
+ <div id="main-wrap">
+ $:{ content }
+ <div id="footer">
+ <p>reviewing: ${ rd.target.root }</p>
+ </div>
+ </div>
+ </body>
+</html>
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_templates/index.html Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,18 @@
+$def with (rd, revs)
+
+<h2>Changesets</h2>
+<table>
+ $for ctx in revs:
+ $ ctx_node = ctx.node()
+ $ ctx_node_short = node_short(ctx_node)
+ $ ctx_comments = rd[ctx_node].comments
+ $ ctx_signoffs = rd[ctx_node].signoffs
+ <tr class="${ loop.parity }">
+ <td>${ ctx.rev() }:${ ctx_node_short }</td>
+ <td>
+ <a href="/review/${ ctx_node_short }/">${ ctx.description() }</a>
+ </td>
+ <td class="last">${ len(ctx_comments) } comments,
+ ${ len(ctx_signoffs) } signoffs</td>
+ </tr>
+</table>
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_templates/review.html Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,78 @@
+$def with (rd, rcset)
+
+$ ctx = rd.target[rcset.node]
+<h2>Changeset ${ ctx.rev() }: ${ ctx.description() }</h2>
+
+$ review_level_comments = filter(lambda c: not c.filename, rcset.comments)
+$for comment in review_level_comments:
+ <div class="comment">
+ <div class="avatar"><img height="52" width="52" src="http://www.gravatar.com/avatar/${ md5(email(comment.author)).hexdigest() }?s=52"/></div>
+ <div>
+ <div class="author"><a href="mailto:${ email(comment.author) }">${ templatefilters.person(comment.author) }</a> said:</div>
+ <div class="message">${ comment.message }</div>
+ </div>
+ </div>
+
+<div id="comment-review">
+ <p class="comment-activate"><a href="">Add a comment on this changeset</a></p>
+ <form id="comment-review-form" method="post" action="">
+ <div class="field">
+ <label for="body">Add a comment on this changeset:</label>
+ <textarea cols="60" rows="6" name="body"></textarea>
+ </div>
+ <div class="buttons">
+ <input type="submit" class="button" value="Submit" />
+ </div>
+ </form>
+</div>
+
+$for filename, diff in rcset.diffs().iteritems():
+ <div class="filename-header">
+ <h3>${ filename }</h3>
+ </div>
+
+ $ file_level_comments = filter(lambda c: c.filename == filename and not c.lines, rcset.comments)
+ $for comment in file_level_comments:
+ <div class="comment">
+ <div class="avatar"><img height="52" width="52" src="http://www.gravatar.com/avatar/${ md5(email(comment.author)).hexdigest() }?s=52"/></div>
+ <div>
+ <div class="author"><a href="mailto:${ email(comment.author) }">${ templatefilters.person(comment.author) }</a> said:</div>
+ <div class="message">${ comment.message }</div>
+ </div>
+ </div>
+
+ <div id="comment-file">
+ <p class="comment-activate"><a href="">Add a comment on this file</a></p>
+ <form id="comment-file-form" method="post" action="">
+ <div class="field">
+ <label for="body">Add a comment on this file:</label>
+ <textarea cols="60" rows="6" name="body"></textarea>
+ </div>
+ <div class="buttons">
+ <input type="submit" class="button" value="Submit" />
+ </div>
+ <input type="hidden" name="filename" value="${ filename }" />
+ </form>
+ </div>
+
+ <div class="diff">
+ <table>
+ $ max_line = diff['max']
+ $ content = diff['content']
+ $ line_level_comments = filter(lambda c: c.filename == filename and c.lines, rcset.comments)
+ $for n, line in content:
+ $ kind = 'rem' if line[0] == '-' else 'add' if line[0] == '+' else ''
+ <tr class="${ kind }">
+ <td class="diff-line"><code>${ line[1:] or ' ' }</code></td>
+ </tr>
+ $ line_comments = filter(lambda c: max(c.lines) == n, line_level_comments)
+ $for comment in line_comments:
+ <tr><td class="comment">
+ <div class="avatar"><img height="52" width="52" src="http://www.gravatar.com/avatar/${ md5(email(comment.author)).hexdigest() }?s=52"/></div>
+ <div>
+ <div class="author"><a href="mailto:${ email(comment.author) }">${ templatefilters.person(comment.author) }</a> said:</div>
+ <div class="message">${ comment.message }</div>
+ </div>
+ </td></tr>
+ </table>
+ </div>
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/review/web_ui.py Wed Oct 21 19:32:14 2009 -0400
@@ -0,0 +1,89 @@
+"""The review extension's web UI."""
+
+import sys, os
+import api
+from mercurial import cmdutil
+
+package_path = os.path.split(os.path.realpath(__file__))[0]
+template_path = os.path.join(package_path, 'web_templates')
+media_path = os.path.join(package_path, 'web_media')
+top_path = os.path.split(package_path)[0]
+bundled_path = os.path.join(top_path, 'bundled')
+webpy_path = os.path.join(bundled_path, 'webpy')
+
+sys.path.insert(0, webpy_path)
+import web
+
+
+_rd = None
+urls = (
+ '/', 'index',
+ '/media/([^/]*)', 'media',
+ '/review/([\da-f]{12})/?', 'review',
+)
+
+
+from mercurial.node import short
+from mercurial.util import email
+from mercurial import templatefilters
+from hashlib import md5
+g = {
+ 'node_short': short,
+ 'basename': os.path.basename,
+ 'md5': md5,
+ 'email': email,
+ 'templatefilters': templatefilters,
+}
+render = web.template.render(template_path, globals=g)
+
+LOG_PAGE_LEN = 25
+
+def render_in_base(fn):
+ def _fn(*args, **kwargs):
+ content = fn(*args, **kwargs)
+ return render.base(_rd, content)
+ return _fn
+
+class index:
+ @render_in_base
+ def GET(self):
+ rev_max = _rd.target['tip'].rev()
+ rev_min = rev_max - LOG_PAGE_LEN if rev_max >= LOG_PAGE_LEN else 0
+ revs = (_rd.target[r] for r in xrange(rev_max, rev_min, -1))
+ return render.index(_rd, revs)
+
+
+class review:
+ @render_in_base
+ def GET(self, node_short):
+ return render.review(_rd, _rd[node_short])
+
+ def POST(self, node_short):
+ i = web.input()
+ body = i['body']
+ filename = i['filename'] if 'filename' in i else ''
+
+ if body:
+ rcset = _rd[node_short]
+ rcset.add_comment(body, filename)
+
+ raise web.seeother('/review/%s/' % node_short)
+
+
+class media:
+ def GET(self, fname):
+ if '..' in fname:
+ return ''
+ else:
+ with open(os.path.join(media_path, fname)) as f:
+ content = f.read()
+ return content
+
+
+def load_interface(ui, repo):
+ global _rd
+ _rd = api.ReviewDatastore(ui, repo)
+
+ sys.argv = sys.argv[:1] # Seriously, web.py? This is such a hack.
+ app = web.application(urls, globals())
+ app.run()