b2bd9c232faa flask

Bundle Werkzeug.
[view raw] [browse files]
author Steve Losh <steve@stevelosh.com>
date Fri, 11 Jun 2010 20:14:01 -0400
parents 46b413229d3d
children 9a463602589d
branches/tags flask
files bundled/werkzeug/.hgignore bundled/werkzeug/.hgtags bundled/werkzeug/AUTHORS bundled/werkzeug/CHANGES bundled/werkzeug/LICENSE bundled/werkzeug/MANIFEST.in bundled/werkzeug/Makefile bundled/werkzeug/setup.cfg bundled/werkzeug/setup.py bundled/werkzeug/werkzeug/__init__.py bundled/werkzeug/werkzeug/_internal.py bundled/werkzeug/werkzeug/contrib/__init__.py bundled/werkzeug/werkzeug/contrib/atom.py bundled/werkzeug/werkzeug/contrib/cache.py bundled/werkzeug/werkzeug/contrib/fixers.py bundled/werkzeug/werkzeug/contrib/iterio.py bundled/werkzeug/werkzeug/contrib/jsrouting.py bundled/werkzeug/werkzeug/contrib/kickstart.py bundled/werkzeug/werkzeug/contrib/limiter.py bundled/werkzeug/werkzeug/contrib/lint.py bundled/werkzeug/werkzeug/contrib/profiler.py bundled/werkzeug/werkzeug/contrib/securecookie.py bundled/werkzeug/werkzeug/contrib/sessions.py bundled/werkzeug/werkzeug/contrib/testtools.py bundled/werkzeug/werkzeug/contrib/wrappers.py bundled/werkzeug/werkzeug/datastructures.py bundled/werkzeug/werkzeug/debug/__init__.py bundled/werkzeug/werkzeug/debug/console.py bundled/werkzeug/werkzeug/debug/render.py bundled/werkzeug/werkzeug/debug/repr.py bundled/werkzeug/werkzeug/debug/shared/body.tmpl bundled/werkzeug/werkzeug/debug/shared/codetable.tmpl bundled/werkzeug/werkzeug/debug/shared/console.png bundled/werkzeug/werkzeug/debug/shared/debugger.js bundled/werkzeug/werkzeug/debug/shared/jquery.js bundled/werkzeug/werkzeug/debug/shared/less.png bundled/werkzeug/werkzeug/debug/shared/more.png bundled/werkzeug/werkzeug/debug/shared/source.png bundled/werkzeug/werkzeug/debug/shared/style.css bundled/werkzeug/werkzeug/debug/shared/vartable.tmpl bundled/werkzeug/werkzeug/debug/tbtools.py bundled/werkzeug/werkzeug/debug/templates/console.html bundled/werkzeug/werkzeug/debug/templates/dump_object.html bundled/werkzeug/werkzeug/debug/templates/frame.html bundled/werkzeug/werkzeug/debug/templates/help_command.html bundled/werkzeug/werkzeug/debug/templates/source.html bundled/werkzeug/werkzeug/debug/templates/traceback_full.html bundled/werkzeug/werkzeug/debug/templates/traceback_plaintext.html bundled/werkzeug/werkzeug/debug/templates/traceback_summary.html bundled/werkzeug/werkzeug/debug/utils.py bundled/werkzeug/werkzeug/exceptions.py bundled/werkzeug/werkzeug/formparser.py bundled/werkzeug/werkzeug/http.py bundled/werkzeug/werkzeug/local.py bundled/werkzeug/werkzeug/posixemulation.py bundled/werkzeug/werkzeug/routing.py bundled/werkzeug/werkzeug/script.py bundled/werkzeug/werkzeug/security.py bundled/werkzeug/werkzeug/serving.py bundled/werkzeug/werkzeug/templates.py bundled/werkzeug/werkzeug/test.py bundled/werkzeug/werkzeug/testapp.py bundled/werkzeug/werkzeug/urls.py bundled/werkzeug/werkzeug/useragents.py bundled/werkzeug/werkzeug/utils.py bundled/werkzeug/werkzeug/wrappers.py bundled/werkzeug/werkzeug/wsgi.py

Changes

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/.hgignore	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,10 @@
+^MANIFEST$
+^(build|dist|Werkzeug\.egg-info)/
+\.py[co]$
+\.DS_Store$
+^env$
+^docs/_build
+[^/]+-stats\.txt$
+^bench/[ab]/
+\.coverage
+coverage_out/
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/.hgtags	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,11 @@
+b4548e55e0257c6c777a0484fbc86a8b82090cd0 0.1
+f93fb9db6fce63c32849e7369ae0b16e137a5b2b 0.2
+40e894d190577011b989051e7104fc4444a72ef4 0.3
+7bcec7da720c22ba07153b832fb6657d97d4b2d6 0.3.1
+0c47d98e422773bebce294d9f5ce74dc189f1952 0.4
+a93240c851ab4ac3c9426f61b69391586d3d7fca 0.4.1
+426d64c7ef3e605eb7551980b3b9de3bb0001dab 0.5
+f972dac514934c6b993c5d5813a03da15bda6a46 0.5.1
+f0716ad49661076e0f129b9ee4e5f7118a25e730 0.6
+a32bf2b4175616347b2493ff58aebbc5e4392901 0.6.1
+3aa163672076c95e0e36e65cfbb28d79bf8368ef 0.6.2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/AUTHORS	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,42 @@
+Werkzeug is written and maintained by the Werkzeug Team and various
+contributors:
+
+Project Leader / Developer:
+
+- Armin Ronacher <armin.ronacher@active-4.com>
+
+- Georg Brandl
+- Leif K-Brooks <eurleif@gmail.com>
+- Thomas Johansson
+- Marian Sigler
+- Ronny Pfannschmidt
+- Noah Slater <nslater@tumbolia.org>
+- Alec Thomas
+- Shannon Behrens
+- Christoph Rauch
+- Clemens Hermann
+- Jason Kirtland
+- Ali Afshar
+- Christopher Grebs <cg@webshox.org>
+- Sean Cazzell <seancazzell@gmail.com>
+- Florent Xicluna
+
+Contributors of code for werkzeug/examples are:
+
+- Itay Neeman <itay@neeman.net>
+
+The SSL related parts of the Werkzeug development server are partially
+taken from Paste.  The original code is MIT licensed which is largely
+compatible with the modfied BSD license.  The following copyrights apply:
+
+- (c) 2005 Ian Bicking and contributors
+- (c) 2005 Clark C. Evans
+
+The rename() function from the posixemulation was taken almost unmodified
+from the Trac project's utility module.  The original code is BSD licensed
+with the following copyrights from that module:
+
+- (c) 2003-2009 Edgewall Software
+- (c) 2003-2006 Jonas Borgström <jonas@edgewall.com>
+- (c) 2006 Matthew Good <trac@matt-good.net>
+- (c) 2005-2006 Christian Boos <cboos@neuf.fr>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/CHANGES	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,371 @@
+Werkzeug Changelog
+==================
+
+Version 1.0
+-----------
+(first 1.0 release, release date to be announced)
+
+- improved func:`url_decode` and :func:`url_encode` performance.
+- fixed an issue where the SharedDataMiddleware could cause an
+  internal server error on weird paths when loading via pkg_resources.
+- fixed an URL generation bug that caused URLs to be invalid if a
+  generated component contains a colon.
+- :func:`werkzeug.import_string` now works with partially set up
+  packages properly.
+
+Version 0.6.2
+-------------
+(bugfix release, released on April 23th 2010)
+
+- renamed the attribute `implicit_seqence_conversion` attribute of the
+  request object to `implicit_sequence_conversion`.
+
+Version 0.6.1
+-------------
+(bugfix release, released on April 13th 2010)
+
+- heavily improved local objects.  Should pick up standalone greenlet
+  builds now and support proxies to free callables as well.  There is
+  also a stacked local now that makes it possible to invoke the same
+  application from within itself by pushing current request/response
+  on top of the stack.
+- routing build method will also build non-default method rules properly
+  if no method is provided.
+- added proper IPv6 support for the builtin server.
+- windows specific filesystem session store fixes.
+  (should no be more stable under high concurrency)
+- fixed a `NameError` in the session system.
+- fixed a bug with empty arguments in the werkzeug.script system.
+- fixed a bug where log lines will be duplicated if an application uses
+  :meth:`logging.basicConfig` (#499)
+- added secure password hashing and checking functions.
+- `HEAD` is now implicitly added as method in the routing system if
+  `GET` is present.  Not doing that was considered a bug because often
+  code assumed that this is the case and in web servers that do not
+  normalize `HEAD` to `GET` this could break `HEAD` requests.
+- the script support can start SSL servers now.
+
+Version 0.6
+-----------
+(codename Hammer, released on Feb 19th 2010)
+
+- removed pending deprecations
+- sys.path is now printed from the testapp.
+- fixed an RFC 2068 incompatibility with cookie value quoting.
+- the :class:`FileStorage` now gives access to the multipart headers.
+- `cached_property.writeable` has been deprecated.
+- :meth:`MapAdapter.match` now accepts a `return_rule` keyword argument
+  that returns the matched `Rule` instead of just the `endpoint`
+- :meth:`routing.Map.bind_to_environ` raises a more correct error message
+  now if the map was bound to an invalid WSGI environment.
+- added support for SSL to the builtin development server.
+- Response objects are no longer modified in place when they are evaluated
+  as WSGI applications.  For backwards compatibility the `fix_headers`
+  function is still called in case it was overridden.
+  You should however change your application to use `get_wsgi_headers` if
+  you need header modifications before responses are sent as the backwards
+  compatibility support will go away in future versions.
+- :func:`append_slash_redirect` no longer requires the QUERY_STRING to be
+  in the WSGI environment.
+- added :class:`~werkzeug.contrib.wrappers.DynamicCharsetResponseMixin`
+- added :class:`~werkzeug.contrib.wrappers.DynamicCharsetRequestMixin`
+- added :attr:`BaseRequest.url_charset`
+- request and response objects have a default `__repr__` now.
+- builtin data structures can be pickled now.
+- the form data parser will now look at the filename instead the
+  content type to figure out if it should treat the upload as regular
+  form data or file upload.  This fixes a bug with Google Chrome.
+- improved performance of `make_line_iter` and the multipart parser
+  for binary uploads.
+- fixed :attr:`~werkzeug.BaseResponse.is_streamed`
+- fixed a path quoting bug in `EnvironBuilder` that caused PATH_INFO and
+  SCRIPT_NAME to end up in the environ unquoted.
+- :meth:`werkzeug.BaseResponse.freeze` now sets the content length.
+- for unknown HTTP methods the request stream is now always limited
+  instead of being empty.  This makes it easier to implement DAV
+  and other protocols on top of Werkzeug.
+- added :meth:`werkzeug.MIMEAccept.best_match`
+- multi-value test-client posts from a standard dictionary are now
+  supported.  Previously you had to use a multi dict.
+- rule templates properly work with submounts, subdomains and
+  other rule factories now.
+- deprecated non-silent usage of the :class:`werkzeug.LimitedStream`.
+- added support for IRI handling to many parts of Werkzeug.
+- development server properly logs to the werkzeug logger now.
+- added :func:`werkzeug.extract_path_info`
+- fixed a querystring quoting bug in :func:`url_fix`
+- added `fallback_mimetype` to :class:`werkzeug.SharedDataMiddleware`.
+- deprecated :meth:`BaseResponse.iter_encoded`'s charset parameter.
+- added :meth:`BaseResponse.make_sequence`,
+  :attr:`BaseResponse.is_sequence` and
+  :meth:`BaseResponse._ensure_sequence`.
+- added better __repr__ of :class:`werkzeug.Map`
+- `import_string` accepts unicode strings as well now.
+- development server doesn't break on double slashes after the host name.
+- better `__repr__` and `__str__` of
+  :exc:`werkzeug.exceptions.HTTPException`
+- test client works correctly with multiple cookies now.
+- the :class:`werkzeug.routing.Map` now has a class attribute with
+  the default converter mapping.  This helps subclasses to override
+  the converters without passing them to the constructor.
+- implemented :class:`OrderedMultiDict`
+- improved the session support for more efficient session storing
+  on the filesystem.  Also added support for listing of sessions
+  currently stored in the filesystem session store.
+- werkzeug no longer utilizes the Python time module for parsing
+  which means that dates in a broader range can be parsed.
+- the wrappers have no class attributes that make it possible to
+  swap out the dict and list types it uses.
+- werkzeug debugger should work on the appengine dev server now.
+- the URL builder supports dropping of unexpected arguments now.
+  Previously they were always appended to the URL as query string.
+- profiler now writes to the correct stream.
+
+Version 0.5.1
+-------------
+(bugfix release for 0.5, released on July 9th 2009)
+
+- fixed boolean check of :class:`FileStorage`
+- url routing system properly supports unicode URL rules now.
+- file upload streams no longer have to provide a truncate()
+  method.
+- implemented :meth:`BaseRequest._form_parsing_failed`.
+- fixed #394 
+- :meth:`ImmutableDict.copy`, :meth:`ImmutableMultiDict.copy` and
+  :meth:`ImmutableTypeConversionDict.copy` return mutable shallow
+  copies.
+- fixed a bug with the `make_runserver` script action.
+- :meth:`MultiDict.items` and :meth:`MutiDict.iteritems` now accept an
+  argument to return a pair for each value of each key.
+- the multipart parser works better with hand-crafted multipart
+  requests now that have extra newlines added.  This fixes a bug
+  with setuptools uploades not handled properly (#390)
+- fixed some minor bugs in the atom feed generator.
+- fixed a bug with client cookie header parsing being case sensitive.
+- fixed a not-working deprecation warning.
+- fixed package loading for :class:`SharedDataMiddleware`.
+- fixed a bug in the secure cookie that made server-side expiration
+  on servers with a local time that was not set to UTC impossible.
+- fixed console of the interactive debugger.
+
+
+Version 0.5
+-----------
+(codename Schlagbohrer, released on April 24th 2009)
+
+- requires Python 2.4 now
+- fixed a bug in :class:`~contrib.IterIO`
+- added :class:`MIMEAccept` and :class:`CharsetAccept` that work like the
+  regular :class:`Accept` but have extra special normalization for mimetypes
+  and charsets and extra convenience methods.
+- switched the serving system from wsgiref to something homebrew.
+- the :class:`Client` now supports cookies.
+- added the :mod:`~werkzeug.contrib.fixers` module with various
+  fixes for webserver bugs and hosting setup side-effects.
+- added :mod:`werkzeug.contrib.wrappers`
+- added :func:`is_hop_by_hop_header`
+- added :func:`is_entity_header`
+- added :func:`remove_hop_by_hop_headers`
+- added :func:`pop_path_info`
+- added :func:`peek_path_info`
+- added :func:`wrap_file` and :class:`FileWrapper`
+- moved `LimitedStream` from the contrib package into the regular
+  werkzeug one and changed the default behavior to raise exceptions
+  rather than stopping without warning.  The old class will stick in
+  the module until 0.6.
+- implemented experimental multipart parser that replaces the old CGI hack.
+- added :func:`dump_options_header` and :func:`parse_options_header`
+- added :func:`quote_header_value` and :func:`unquote_header_value`
+- :func:`url_encode` and :func:`url_decode` now accept a separator
+  argument to switch between `&` and `;` as pair separator.  The magic
+  switch is no longer in place.
+- all form data parsing functions as well as the :class:`BaseRequest`
+  object have parameters (or attributes) to limit the number of
+  incoming bytes (either totally or per field).
+- added :class:`LanguageAccept`
+- request objects are now enforced to be read only for all collections.
+- added many new collection classes, refactored collections in general.
+- test support was refactored, semi-undocumented `werkzeug.test.File`
+  was replaced by :class:`werkzeug.FileStorage`.
+- :class:`EnvironBuilder` was added and unifies the previous distinct
+  :func:`create_environ`, :class:`Client` and
+  :meth:`BaseRequest.from_values`.  They all work the same now which
+  is less confusing.
+- officially documented imports from the internal modules as undefined
+  behavior.  These modules were never exposed as public interfaces.
+- removed `FileStorage.__len__` which previously made the object
+  falsy for browsers not sending the content length which all browsers
+  do.
+- :class:`SharedDataMiddleware` uses `wrap_file` now and has a
+  configurable cache timeout.
+- added :class:`CommonRequestDescriptorsMixin`
+- added :attr:`CommonResponseDescriptorsMixin.mimetype_params`
+- added :mod:`werkzeug.contrib.lint`
+- added `passthrough_errors` to `run_simple`.
+- added `secure_filename`
+- added :func:`make_line_iter` 
+- :class:`MultiDict` copies now instead of revealing internal
+  lists to the caller for `getlist` and iteration functions that
+  return lists.
+- added :attr:`follow_redirect` to the :func:`open` of :class:`Client`.
+- added support for `extra_files` in
+  :func:`~werkzeug.script.make_runserver`
+
+Version 0.4.1
+-------------
+(Bugfix release, released on January 11th 2009)
+
+- `werkzeug.contrib.cache.Memcached` accepts now objects that
+  implement the memcache.Client interface as alternative to a list of
+  strings with server addresses.
+  There is also now a `GAEMemcachedCache` that connects to the Google
+  appengine cache.
+- explicitly convert secret keys to bytestrings now because Python
+  2.6 no longer does that.
+- `url_encode` and all interfaces that call it, support ordering of
+  options now which however is disabled by default.
+- the development server no longer resolves the addresses of clients.
+- Fixed a typo in `werkzeug.test` that broke `File`.
+- `Map.bind_to_environ` uses the `Host` header now if available.
+- Fixed `BaseCache.get_dict` (#345)
+- `werkzeug.test.Client` can now run the application buffered in which
+  case the application is properly closed automatically.
+- Fixed `Headers.set` (#354).  Caused header duplication before.
+- Fixed `Headers.pop` (#349).  default parameter was not properly
+  handled.
+- Fixed UnboundLocalError in `create_environ` (#351)
+- `Headers` is more compatible with wsgiref now.
+- `Template.render` accepts multidicts now.
+- dropped support for Python 2.3
+
+Version 0.4
+-----------
+(codename Schraubenzieher, released on November 23rd 2008)
+
+- `Client` supports an empty `data` argument now.
+- fixed a bug in `Response.application` that made it impossible to use it
+  as method decorator.
+- the session system should work on appengine now
+- the secure cookie works properly in load balanced environments with
+  different cpu architectures now.
+- `CacheControl.no_cache` and `CacheControl.private` behavior changed to
+  reflect the possibilities of the HTTP RFC.  Setting these attributes to
+  `None` or `True` now sets the value to "the empty value".
+  More details in the documentation.
+- fixed `werkzeug.contrib.atom.AtomFeed.__call__`. (#338)
+- `BaseResponse.make_conditional` now always returns `self`.  Previously
+  it didn't for post requests and such.
+- fixed a bug in boolean attribute handling of `html` and `xhtml`.
+- added graceful error handling to the debugger pastebin feature.
+- added a more list like interface to `Headers` (slicing and indexing
+  works now)
+- fixed a bug with the `__setitem__` method of `Headers` that didn't
+  properly remove all keys on replacing.
+- added `remove_entity_headers` which removes all entity headers from
+  a list of headers (or a `Headers` object)
+- the responses now automatically call `remove_entity_headers` if the
+  status code is 304.
+- fixed a bug with `Href` query parameter handling.  Previously the last
+  item of a call to `Href` was not handled properly if it was a dict.
+- headers now support a `pop` operation to better work with environ
+  properties.
+
+
+Version 0.3.1
+-------------
+(released on June 24th 2008)
+
+- fixed a security problem with `werkzeug.contrib.SecureCookie`.
+  More details available in the `release announcement`_.
+
+.. _release announcement: http://lucumr.pocoo.org/cogitations/2008/06/24/werkzeug-031-released/
+
+Version 0.3
+-----------
+(codename EUR325CAT6, released on June 14th 2008)
+
+- added support for redirecting in url routing.
+- added `Authorization` and `AuthorizationMixin`
+- added `WWWAuthenticate` and `WWWAuthenticateMixin`
+- added `parse_list_header`
+- added `parse_dict_header`
+- added `parse_authorization_header`
+- added `parse_www_authenticate_header`
+- added `_get_current_object` method to `LocalProxy` objects
+- added `parse_form_data`
+- `MultiDict`, `CombinedMultiDict`, `Headers`, and `EnvironHeaders` raise
+  special key errors now that are subclasses of `BadRequest` so if you
+  don't catch them they give meaningful HTTP responses.
+- added support for alternative encoding error handling and the new
+  `HTTPUnicodeError` which (if not caught) behaves like a `BadRequest`.
+- added `BadRequest.wrap`.
+- added ETag support to the SharedDataMiddleware and added an option
+  to disable caching.
+- fixed `is_xhr` on the request objects.
+- fixed error handling of the url adapter's `dispatch` method. (#318)
+- fixed bug with `SharedDataMiddleware`.
+- fixed `Accept.values`.
+- `EnvironHeaders` contain content-type and content-length now
+- `url_encode` treats lists and tuples in dicts passed to it as multiple
+  values for the same key so that one doesn't have to pass a `MultiDict`
+  to the function.
+- added `validate_arguments`
+- added `BaseRequest.application`
+- improved Python 2.3 support
+- `run_simple` accepts `use_debugger` and `use_evalex` parameters now,
+  like the `make_runserver` factory function from the script module.
+- the `environ_property` is now read-only by default
+- it's now possible to initialize requests as "shallow" requests which
+  causes runtime errors if the request object tries to consume the
+  input stream.
+
+
+Version 0.2
+-----------
+(codename Faustkeil, released Feb 14th 2008)
+
+- Added `AnyConverter` to the routing system.
+- Added `werkzeug.contrib.securecookie`
+- Exceptions have a ``get_response()`` method that return a response object
+- fixed the path ordering bug (#293), thanks Thomas Johansson
+- `BaseReporterStream` is now part of the werkzeug contrib module.  From
+  Werkzeug 0.3 onwards you will have to import it from there.
+- added `DispatcherMiddleware`.
+- `RequestRedirect` is now a subclass of `HTTPException` and uses a
+  301 status code instead of 302.
+- `url_encode` and `url_decode` can optionally treat keys as unicode strings
+  now, too.
+- `werkzeug.script` has a different caller format for boolean arguments now.
+- renamed `lazy_property` to `cached_property`.
+- added `import_string`.
+- added is_* properties to request objects.
+- added `empty()` method to routing rules.
+- added `werkzeug.contrib.profiler`.
+- added `extends` to `Headers`.
+- added `dump_cookie` and `parse_cookie`.
+- added `as_tuple` to the `Client`.
+- added `werkzeug.contrib.testtools`.
+- added `werkzeug.unescape`
+- added `BaseResponse.freeze`
+- added `werkzeug.contrib.atom`
+- the HTTPExceptions accept an argument `description` now which overrides the
+  default description.
+- the `MapAdapter` has a default for path info now.  If you use
+  `bind_to_environ` you don't have to pass the path later.
+- the wsgiref subclass werkzeug uses for the dev server does not use direct
+  sys.stderr logging any more but a logger called "werkzeug".
+- implemented `Href`.
+- implemented `find_modules`
+- refactored request and response objects into base objects, mixins and
+  full featured subclasses that implement all mixins.
+- added simple user agent parser
+- werkzeug's routing raises `MethodNotAllowed` now if it matches a
+  rule but for a different method.
+- many fixes and small improvements
+
+
+Version 0.1
+-----------
+(codename Wictorinoxger, released Dec 9th 2007)
+
+- Initial release
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/LICENSE	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,29 @@
+Copyright (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+    * The names of the contributors may not be used to endorse or
+      promote products derived from this software without specific
+      prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/MANIFEST.in	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,8 @@
+include Makefile CHANGES LICENSE AUTHORS
+recursive-include werkzeug/debug/shared *
+recursive-include werkzeug/debug/templates *
+recursive-include tests *
+recursive-include docs *
+recursive-include examples *
+recursive-include artwork *
+prune docs/_build/doctrees
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/Makefile	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,29 @@
+#
+# Werkzeug Makefile
+# ~~~~~~~~~~~~~~~~~
+#
+# Shortcuts for various tasks.
+#
+# :copyright: (c) 2008 by the Werkzeug Team, see AUTHORS for more details.
+# :license: BSD, see LICENSE for more details.
+#
+
+TESTS = \
+	tests \
+	tests/contrib
+
+TEST_OPTIONS = \
+	-v \
+	-e '^test_app$$' #skip the test_app application object which is not a test
+
+documentation:
+	@(cd docs; make html)
+
+test:
+	@(nosetests $(TEST_OPTIONS) $(TESTS))
+
+coverage:
+	@(nosetests $(TEST_OPTIONS) --with-coverage --cover-package=werkzeug --cover-html --cover-html-dir=coverage_out $(TESTS))
+
+doctest:
+	@(cd docs; sphinx-build -b doctest . _build/doctest)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/setup.cfg	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,6 @@
+[egg_info]
+tag_build = dev
+tag_date = true
+
+[aliases]
+release = egg_info -RDb ''
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/setup.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+"""
+Werkzeug
+========
+
+Werkzeug started as simple collection of various utilities for WSGI
+applications and has become one of the most advanced WSGI utility
+modules.  It includes a powerful debugger, full featured request and
+response objects, HTTP utilities to handle entity tags, cache control
+headers, HTTP dates, cookie handling, file uploads, a powerful URL
+routing system and a bunch of community contributed addon modules.
+
+Werkzeug is unicode aware and doesn't enforce a specific template
+engine, database adapter or anything else.  It doesn't even enforce
+a specific way of handling requests and leaves all that up to the
+developer. It's most useful for end user applications which should work
+on as many server environments as possible (such as blogs, wikis,
+bulletin boards, etc.).
+
+Details and example applications are available on the
+`Werkzeug website <http://werkzeug.pocoo.org/>`_.
+
+
+Features
+--------
+
+-   unicode awareness
+
+-   request and response objects
+
+-   various utility functions for dealing with HTTP headers such as
+    `Accept` and `Cache-Control` headers.
+
+-   thread local objects with proper cleanup at request end
+
+-   an interactive debugger
+
+-   A simple WSGI server with support for threading and forking
+    with an automatic reloader.
+
+-   a flexible URL routing system with REST support.
+
+-   fully WSGI compatible
+
+
+Development Version
+-------------------
+
+The `Werkzeug tip <http://dev.pocoo.org/hg/werkzeug-main/archive/tip.zip#egg=Werkzeug-dev>`_
+is installable via `easy_install` with ``easy_install Werkzeug==dev``.
+"""
+import os
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils.core import setup
+
+
+setup(
+    name='Werkzeug',
+    version='1.0',
+    url='http://werkzeug.pocoo.org/',
+    license='BSD',
+    author='Armin Ronacher',
+    author_email='armin.ronacher@active-4.com',
+    description='The Swiss Army knife of Python web development',
+    long_description=__doc__,
+    classifiers=[
+        'Development Status :: 5 - Production/Stable',
+        'Environment :: Web Environment',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: BSD License',
+        'Operating System :: OS Independent',
+        'Programming Language :: Python',
+        'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
+        'Topic :: Software Development :: Libraries :: Python Modules'
+    ],
+    packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],
+    package_data={
+        'werkzeug.debug': ['shared/*', 'templates/*']
+    },
+    platforms='any'
+)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/__init__.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug
+    ~~~~~~~~
+
+    Werkzeug is the Swiss Army knife of Python web development.
+
+    It provides useful classes and functions for any WSGI application to make
+    the life of a python web developer much easier.  All of the provided
+    classes are independent from each other so you can mix it with any other
+    library.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from types import ModuleType
+import sys
+
+# This import magic raises concerns quite often which is why the implementation
+# and motivation is explained here in detail now.
+#
+# The majority of the functions and classes provided by Werkzeug work on the
+# HTTP and WSGI layer.  There is no useful grouping for those which is why
+# they are all importable from "werkzeug" instead of the modules where they are
+# implemented.  The downside of that is, that now everything would be loaded at
+# once, even if unused.
+#
+# The implementation of a lazy-loading module in this file replaces the
+# werkzeug package when imported from within.  Attribute access to the werkzeug
+# module will then lazily import from the modules that implement the objects.
+
+
+# import mapping to objects in other modules
+all_by_module = {
+    'werkzeug.debug':       ['DebuggedApplication'],
+    'werkzeug.local':       ['Local', 'LocalManager', 'LocalProxy',
+                             'LocalStack', 'release_local'],
+    'werkzeug.templates':   ['Template'],
+    'werkzeug.serving':     ['run_simple'],
+    'werkzeug.test':        ['Client', 'EnvironBuilder', 'create_environ',
+                             'run_wsgi_app'],
+    'werkzeug.testapp':     ['test_app'],
+    'werkzeug.exceptions':  ['abort', 'Aborter'],
+    'werkzeug.urls':        ['url_decode', 'url_encode', 'url_quote',
+                             'url_quote_plus', 'url_unquote',
+                             'url_unquote_plus', 'url_fix', 'Href',
+                             'iri_to_uri', 'uri_to_iri'],
+    'werkzeug.formparser':  ['parse_form_data'],
+    'werkzeug.utils':       ['escape', 'environ_property', 'cookie_date',
+                             'http_date', 'append_slash_redirect', 'redirect',
+                             'cached_property', 'import_string',
+                             'dump_cookie', 'parse_cookie', 'unescape',
+                             'format_string', 'find_modules', 'header_property',
+                             'html', 'xhtml', 'HTMLBuilder',
+                             'validate_arguments', 'ArgumentValidationError',
+                             'bind_arguments', 'secure_filename'],
+    'werkzeug.wsgi':        ['get_current_url', 'get_host', 'pop_path_info',
+                             'peek_path_info', 'SharedDataMiddleware',
+                             'DispatcherMiddleware', 'ClosingIterator',
+                             'FileWrapper', 'make_line_iter', 'LimitedStream',
+                             'responder', 'wrap_file', 'extract_path_info'],
+    'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
+                             'EnvironHeaders', 'ImmutableList',
+                             'ImmutableDict', 'ImmutableMultiDict',
+                             'TypeConversionDict', 'ImmutableTypeConversionDict',
+                             'Accept', 'MIMEAccept', 'CharsetAccept',
+                             'LanguageAccept', 'RequestCacheControl',
+                             'ResponseCacheControl', 'ETags', 'HeaderSet',
+                             'WWWAuthenticate', 'Authorization',
+                             'FileMultiDict', 'CallbackDict', 'FileStorage',
+                             'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
+    'werkzeug.useragents':  ['UserAgent'],
+    'werkzeug.http':        ['parse_etags', 'parse_date', 'parse_cache_control_header',
+                             'is_resource_modified', 'parse_accept_header',
+                             'parse_set_header', 'quote_etag', 'unquote_etag',
+                             'generate_etag', 'dump_header',
+                             'parse_list_header', 'parse_dict_header',
+                             'parse_authorization_header',
+                             'parse_www_authenticate_header',
+                             'remove_entity_headers', 'is_entity_header',
+                             'remove_hop_by_hop_headers', 'parse_options_header',
+                             'dump_options_header', 'is_hop_by_hop_header',
+                             'unquote_header_value',
+                             'quote_header_value', 'HTTP_STATUS_CODES'],
+    'werkzeug.wrappers':    ['BaseResponse', 'BaseRequest', 'Request',
+                             'Response', 'AcceptMixin', 'ETagRequestMixin',
+                             'ETagResponseMixin', 'ResponseStreamMixin',
+                             'CommonResponseDescriptorsMixin',
+                             'UserAgentMixin', 'AuthorizationMixin',
+                             'WWWAuthenticateMixin',
+                             'CommonRequestDescriptorsMixin'],
+    'werkzeug.security':    ['generate_password_hash', 'check_password_hash'],
+    # the undocumented easteregg ;-)
+    'werkzeug._internal':   ['_easteregg']
+}
+
+# modules that should be imported when accessed as attributes of werkzeug
+attribute_modules = frozenset(['exceptions', 'routing', 'script'])
+
+
+object_origins = {}
+for module, items in all_by_module.iteritems():
+    for item in items:
+        object_origins[item] = module
+
+
+#: the cached version of the library.  We get the distribution from
+#: pkg_resources the first time this attribute is accessed.  Because
+#: this operation is quite slow it speeds up importing a lot.
+version = None
+
+class module(ModuleType):
+    """Automatically import objects from the modules."""
+
+    def __getattr__(self, name):
+        if name in object_origins:
+            module = __import__(object_origins[name], None, None, [name])
+            for extra_name in all_by_module[module.__name__]:
+                setattr(self, extra_name, getattr(module, extra_name))
+            return getattr(module, name)
+        elif name in attribute_modules:
+            __import__('werkzeug.' + name)
+        return ModuleType.__getattribute__(self, name)
+
+    def __dir__(self):
+        """Just show what we want to show."""
+        result = list(new_module.__all__)
+        result.extend(('__file__', '__path__', '__doc__', '__all__',
+                       '__docformat__', '__name__', '__path__',
+                       '__package__', '__version__'))
+        return result
+
+    @property
+    def __version__(self):
+        global version
+        if version is None:
+            try:
+                version = __import__('pkg_resources') \
+                          .get_distribution('Werkzeug').version
+            except:
+                version = 'unknown'
+        return version
+
+# keep a reference to this module so that it's not garbage collected
+old_module = sys.modules['werkzeug']
+
+
+# setup the new module and patch it into the dict of loaded modules
+new_module = sys.modules['werkzeug'] = module('werkzeug')
+new_module.__dict__.update({
+    '__file__':         __file__,
+    '__path__':         __path__,
+    '__doc__':          __doc__,
+    '__all__':          tuple(object_origins) + tuple(attribute_modules),
+    '__docformat__':    'restructuredtext en'
+})
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/_internal.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,398 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug._internal
+    ~~~~~~~~~~~~~~~~~~
+
+    This module provides internally used helpers and constants.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import inspect
+from weakref import WeakKeyDictionary
+from cStringIO import StringIO
+from Cookie import BaseCookie, Morsel, CookieError
+from time import gmtime
+from datetime import datetime, date
+
+
+_logger = None
+_empty_stream = StringIO('')
+_signature_cache = WeakKeyDictionary()
+_epoch_ord = date(1970, 1, 1).toordinal()
+
+
+HTTP_STATUS_CODES = {
+    100:    'Continue',
+    101:    'Switching Protocols',
+    102:    'Processing',
+    200:    'OK',
+    201:    'Created',
+    202:    'Accepted',
+    203:    'Non Authoritative Information',
+    204:    'No Content',
+    205:    'Reset Content',
+    206:    'Partial Content',
+    207:    'Multi Status',
+    226:    'IM Used',              # see RFC 3229
+    300:    'Multiple Choices',
+    301:    'Moved Permanently',
+    302:    'Found',
+    303:    'See Other',
+    304:    'Not Modified',
+    305:    'Use Proxy',
+    307:    'Temporary Redirect',
+    400:    'Bad Request',
+    401:    'Unauthorized',
+    402:    'Payment Required',     # unused
+    403:    'Forbidden',
+    404:    'Not Found',
+    405:    'Method Not Allowed',
+    406:    'Not Acceptable',
+    407:    'Proxy Authentication Required',
+    408:    'Request Timeout',
+    409:    'Conflict',
+    410:    'Gone',
+    411:    'Length Required',
+    412:    'Precondition Failed',
+    413:    'Request Entity Too Large',
+    414:    'Request URI Too Long',
+    415:    'Unsupported Media Type',
+    416:    'Requested Range Not Satisfiable',
+    417:    'Expectation Failed',
+    418:    'I\'m a teapot',        # see RFC 2324
+    422:    'Unprocessable Entity',
+    423:    'Locked',
+    424:    'Failed Dependency',
+    426:    'Upgrade Required',
+    449:    'Retry With',           # proprietary MS extension
+    500:    'Internal Server Error',
+    501:    'Not Implemented',
+    502:    'Bad Gateway',
+    503:    'Service Unavailable',
+    504:    'Gateway Timeout',
+    505:    'HTTP Version Not Supported',
+    507:    'Insufficient Storage',
+    510:    'Not Extended'
+}
+
+
+class _Missing(object):
+
+    def __repr__(self):
+        return 'no value'
+
+    def __reduce__(self):
+        return '_missing'
+
+_missing = _Missing()
+
+
+def _proxy_repr(cls):
+    def proxy_repr(self):
+        return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self))
+    return proxy_repr
+
+
+def _get_environ(obj):
+    env = getattr(obj, 'environ', obj)
+    assert isinstance(env, dict), \
+        '%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
+    return env
+
+
+def _log(type, message, *args, **kwargs):
+    """Log into the internal werkzeug logger."""
+    global _logger
+    if _logger is None:
+        import logging
+        _logger = logging.getLogger('werkzeug')
+        # Only set up a default log handler if the
+        # end-user application didn't set anything up.
+        if not logging.root.handlers and _logger.level == logging.NOTSET:
+            _logger.setLevel(logging.INFO)
+            handler = logging.StreamHandler()
+            _logger.addHandler(handler)
+    getattr(_logger, type)(message.rstrip(), *args, **kwargs)
+
+
+def _parse_signature(func):
+    """Return a signature object for the function."""
+    if hasattr(func, 'im_func'):
+        func = func.im_func
+
+    # if we have a cached validator for this function, return it
+    parse = _signature_cache.get(func)
+    if parse is not None:
+        return parse
+
+    # inspect the function signature and collect all the information
+    positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
+    defaults = defaults or ()
+    arg_count = len(positional)
+    arguments = []
+    for idx, name in enumerate(positional):
+        if isinstance(name, list):
+            raise TypeError('cannot parse functions that unpack tuples '
+                            'in the function signature')
+        try:
+            default = defaults[idx - arg_count]
+        except IndexError:
+            param = (name, False, None)
+        else:
+            param = (name, True, default)
+        arguments.append(param)
+    arguments = tuple(arguments)
+
+    def parse(args, kwargs):
+        new_args = []
+        missing = []
+        extra = {}
+
+        # consume as many arguments as positional as possible
+        for idx, (name, has_default, default) in enumerate(arguments):
+            try:
+                new_args.append(args[idx])
+            except IndexError:
+                try:
+                    new_args.append(kwargs.pop(name))
+                except KeyError:
+                    if has_default:
+                        new_args.append(default)
+                    else:
+                        missing.append(name)
+            else:
+                if name in kwargs:
+                    extra[name] = kwargs.pop(name)
+
+        # handle extra arguments
+        extra_positional = args[arg_count:]
+        if vararg_var is not None:
+            new_args.extend(extra_positional)
+            extra_positional = ()
+        if kwargs and not kwarg_var is not None:
+            extra.update(kwargs)
+            kwargs = {}
+
+        return new_args, kwargs, missing, extra, extra_positional, \
+               arguments, vararg_var, kwarg_var
+    _signature_cache[func] = parse
+    return parse
+
+
+def _patch_wrapper(old, new):
+    """Helper function that forwards all the function details to the
+    decorated function."""
+    try:
+        new.__name__ = old.__name__
+        new.__module__ = old.__module__
+        new.__doc__ = old.__doc__
+        new.__dict__ = old.__dict__
+    except:
+        pass
+    return new
+
+
+def _decode_unicode(value, charset, errors):
+    """Like the regular decode function but this one raises an
+    `HTTPUnicodeError` if errors is `strict`."""
+    fallback = None
+    if errors.startswith('fallback:'):
+        fallback = errors[9:]
+        errors = 'strict'
+    try:
+        return value.decode(charset, errors)
+    except UnicodeError, e:
+        if fallback is not None:
+            return value.decode(fallback, 'ignore')
+        from werkzeug.exceptions import HTTPUnicodeError
+        raise HTTPUnicodeError(str(e))
+
+
+def _iter_modules(path):
+    """Iterate over all modules in a package."""
+    import os
+    import pkgutil
+    if hasattr(pkgutil, 'iter_modules'):
+        for importer, modname, ispkg in pkgutil.iter_modules(path):
+            yield modname, ispkg
+        return
+    from inspect import getmodulename
+    from pydoc import ispackage
+    found = set()
+    for path in path:
+        for filename in os.listdir(path):
+            p = os.path.join(path, filename)
+            modname = getmodulename(filename)
+            if modname and modname != '__init__':
+                if modname not in found:
+                    found.add(modname)
+                    yield modname, ispackage(modname)
+
+
+def _dump_date(d, delim):
+    """Used for `http_date` and `cookie_date`."""
+    if d is None:
+        d = gmtime()
+    elif isinstance(d, datetime):
+        d = d.utctimetuple()
+    elif isinstance(d, (int, long, float)):
+        d = gmtime(d)
+    return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
+        ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
+        d.tm_mday, delim,
+        ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
+         'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
+        delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
+    )
+
+
+def _date_to_unix(arg):
+    """Converts a timetuple, integer or datetime object into the seconds from
+    epoch in utc.
+    """
+    if isinstance(arg, datetime):
+        arg = arg.utctimetuple()
+    elif isinstance(arg, (int, long, float)):
+        return int(arg)
+    year, month, day, hour, minute, second = arg[:6]
+    days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
+    hours = days * 24 + hour
+    minutes = hours * 60 + minute
+    seconds = minutes * 60 + second
+    return seconds
+
+
+class _ExtendedMorsel(Morsel):
+    _reserved = {'httponly': 'HttpOnly'}
+    _reserved.update(Morsel._reserved)
+
+    def __init__(self, name=None, value=None):
+        Morsel.__init__(self)
+        if name is not None:
+            self.set(name, value, value)
+
+    def OutputString(self, attrs=None):
+        httponly = self.pop('httponly', False)
+        result = Morsel.OutputString(self, attrs).rstrip('\t ;')
+        if httponly:
+            result += '; HttpOnly'
+        return result
+
+
+class _ExtendedCookie(BaseCookie):
+    """Form of the base cookie that doesn't raise a `CookieError` for
+    malformed keys.  This has the advantage that broken cookies submitted
+    by nonstandard browsers don't cause the cookie to be empty.
+    """
+
+    def _BaseCookie__set(self, key, real_value, coded_value):
+        morsel = self.get(key, _ExtendedMorsel())
+        try:
+            morsel.set(key, real_value, coded_value)
+        except CookieError:
+            pass
+        dict.__setitem__(self, key, morsel)
+
+
+class _DictAccessorProperty(object):
+    """Baseclass for `environ_property` and `header_property`."""
+    read_only = False
+
+    def __init__(self, name, default=None, load_func=None, dump_func=None,
+                 read_only=None, doc=None):
+        self.name = name
+        self.default = default
+        self.load_func = load_func
+        self.dump_func = dump_func
+        if read_only is not None:
+            self.read_only = read_only
+        self.__doc__ = doc
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        storage = self.lookup(obj)
+        if self.name not in storage:
+            return self.default
+        rv = storage[self.name]
+        if self.load_func is not None:
+            try:
+                rv = self.load_func(rv)
+            except (ValueError, TypeError):
+                rv = self.default
+        return rv
+
+    def __set__(self, obj, value):
+        if self.read_only:
+            raise AttributeError('read only property')
+        if self.dump_func is not None:
+            value = self.dump_func(value)
+        self.lookup(obj)[self.name] = value
+
+    def __delete__(self, obj):
+        if self.read_only:
+            raise AttributeError('read only property')
+        self.lookup(obj).pop(self.name, None)
+
+    def __repr__(self):
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            self.name
+        )
+
+
+def _easteregg(app):
+    """Like the name says.  But who knows how it works?"""
+    gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in '''
+eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
+9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
+4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
+jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
+q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
+jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
+8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
+v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
+XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
+LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
+iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
+tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
+1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
+GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
+Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
+QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
+8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
+jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
+DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
+MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
+GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
+RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
+Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
+NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
+pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
+sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
+p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
+krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
+nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
+mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
+7f2zLkGNv8b191cD/3vs9Q833z8t'''.decode('base64').decode('zlib').splitlines()])
+    def easteregged(environ, start_response):
+        def injecting_start_response(status, headers, exc_info=None):
+            headers.append(('X-Powered-By', 'Werkzeug'))
+            return start_response(status, headers, exc_info)
+        if environ.get('QUERY_STRING') != 'macgybarchakku':
+            return app(environ, injecting_start_response)
+        injecting_start_response('200 OK', [('Content-Type', 'text/html')])
+        return ['''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
+<title>About Werkzeug</>
+<style type="text/css">
+  body { font: 15px Georgia, serif; text-align: center; }
+  a { color: #333; text-decoration: none; }
+  h1 { font-size: 30px; margin: 20px 0 10px 0; }
+  p { margin: 0 0 30px 0; }
+  pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
+</style>
+<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
+<p>the Swiss Army knife of Python web development.
+<pre>%s\n\n\n</>''' % gyver]
+    return easteregged
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/__init__.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib
+    ~~~~~~~~~~~~~~~~
+
+    Contains user-submitted code that other users may find useful, but which
+    is not part of the Werkzeug core.  Anyone can write code for inclusion in
+    the `contrib` package.  All modules in this package are distributed as an
+    add-on library and thus are not part of Werkzeug itself.
+
+    This file itself is mostly for informational purposes and to tell the
+    Python interpreter that `contrib` is a package.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/atom.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,343 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.atom
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides a class called :class:`AtomFeed` which can be
+    used to generate feeds in the Atom syndication format (see :rfc:`4287`).
+
+    Example::
+
+        def atom_feed(request):
+            feed = AtomFeed("My Blog", feed_url=request.url,
+                            url=request.host_url,
+                            subtitle="My example blog for a feed test.")
+            for post in Post.query.limit(10).all():
+                feed.add(post.title, post.body, content_type='html',
+                         author=post.author, url=post.url, id=post.uid,
+                         updated=post.last_update, published=post.pub_date)
+            return feed.get_response()
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from datetime import datetime
+from werkzeug.utils import escape
+from werkzeug.wrappers import BaseResponse
+
+
+XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
+
+
+def _make_text_block(name, content, content_type=None):
+    """Helper function for the builder that creates an XML text block."""
+    if content_type == 'xhtml':
+        return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
+               (name, XHTML_NAMESPACE, content, name)
+    if not content_type:
+        return u'<%s>%s</%s>\n' % (name, escape(content), name)
+    return u'<%s type="%s">%s</%s>\n' % (name, content_type,
+                                         escape(content), name)
+
+
+def format_iso8601(obj):
+    """Format a datetime object for iso8601"""
+    return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+
+class AtomFeed(object):
+    """A helper class that creates Atom feeds.
+
+    :param title: the title of the feed. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the feed (not the url *of* the feed)
+    :param id: a globally unique id for the feed.  Must be an URI.  If
+               not present the `feed_url` is used, but one of both is
+               required.
+    :param updated: the time the feed was modified the last time.  Must
+                    be a :class:`datetime.datetime` object.  If not
+                    present the latest entry's `updated` is used.
+    :param feed_url: the URL to the feed.  Should be the URL that was
+                     requested.
+    :param author: the author of the feed.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if not every entry has an
+                   author element.
+    :param icon: an icon for the feed.
+    :param logo: a logo for the feed.
+    :param rights: copyright information for the feed.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param subtitle: a short description of the feed.
+    :param subtitle_type: the type attribute for the subtitle element.
+                          One of ``'text'``, ``'html'``, ``'text'``
+                          or ``'xhtml'``.  Default is ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param generator: the software that generated this feed.  This must be
+                      a tuple in the form ``(name, url, version)``.  If
+                      you don't want to specify one of them, set the item
+                      to `None`.
+    :param entries: a list with the entries for the feed. Entries can also
+                    be added later with :meth:`add`.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
+
+    default_generator = ('Werkzeug', None, None)
+
+    def __init__(self, title=None, entries=None, **kwargs):
+        self.title = title
+        self.title_type = kwargs.get('title_type', 'text')
+        self.url = kwargs.get('url')
+        self.feed_url = kwargs.get('feed_url', self.url)
+        self.id = kwargs.get('id', self.feed_url)
+        self.updated = kwargs.get('updated')
+        self.author = kwargs.get('author', ())
+        self.icon = kwargs.get('icon')
+        self.logo = kwargs.get('logo')
+        self.rights = kwargs.get('rights')
+        self.rights_type = kwargs.get('rights_type')
+        self.subtitle = kwargs.get('subtitle')
+        self.subtitle_type = kwargs.get('subtitle_type', 'text')
+        self.generator = kwargs.get('generator')
+        if self.generator is None:
+            self.generator = self.default_generator
+        self.links = kwargs.get('links', [])
+        self.entries = entries and list(entries) or []
+
+        if not hasattr(self.author, '__iter__') \
+           or isinstance(self.author, (basestring, dict)):
+            self.author = [self.author]
+        for i, author in enumerate(self.author):
+            if not isinstance(author, dict):
+                self.author[i] = {'name': author}
+
+        if not self.title:
+            raise ValueError('title is required')
+        if not self.id:
+            raise ValueError('id is required')
+        for author in self.author:
+            if 'name' not in author:
+                raise TypeError('author must contain at least a name')
+
+    def add(self, *args, **kwargs):
+        """Add a new entry to the feed.  This function can either be called
+        with a :class:`FeedEntry` or some keyword and positional arguments
+        that are forwarded to the :class:`FeedEntry` constructor.
+        """
+        if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
+            self.entries.append(args[0])
+        else:
+            kwargs['feed_url'] = self.feed_url
+            self.entries.append(FeedEntry(*args, **kwargs))
+
+    def __repr__(self):
+        return '<%s %r (%d entries)>' % (
+            self.__class__.__name__,
+            self.title,
+            len(self.entries)
+        )
+
+    def generate(self):
+        """Return a generator that yields pieces of XML."""
+        # atom demands either an author element in every entry or a global one
+        if not self.author:
+            if False in map(lambda e: bool(e.author), self.entries):
+                self.author = ({'name': u'unbekannter Autor'},)
+
+        if not self.updated:
+            dates = sorted([entry.updated for entry in self.entries])
+            self.updated = dates and dates[-1] or datetime.utcnow()
+
+        yield u'<?xml version="1.0" encoding="utf-8"?>\n'
+        yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
+        yield '  ' + _make_text_block('title', self.title, self.title_type)
+        yield u'  <id>%s</id>\n' % escape(self.id)
+        yield u'  <updated>%s</updated>\n' % format_iso8601(self.updated)
+        if self.url:
+            yield u'  <link href="%s" />\n' % escape(self.url, True)
+        if self.feed_url:
+            yield u'  <link href="%s" rel="self" />\n' % \
+                escape(self.feed_url, True)
+        for link in self.links:
+            yield u'  <link %s/>\n' % ''.join('%s="%s" ' % \
+                (k, escape(link[k], True)) for k in link)
+        for author in self.author:
+            yield u'  <author>\n'
+            yield u'    <name>%s</name>\n' % escape(author['name'])
+            if 'uri' in author:
+                yield u'    <uri>%s</uri>\n' % escape(author['uri'])
+            if 'email' in author:
+                yield '    <email>%s</email>\n' % escape(author['email'])
+            yield '  </author>\n'
+        if self.subtitle:
+            yield '  ' + _make_text_block('subtitle', self.subtitle,
+                                          self.subtitle_type)
+        if self.icon:
+            yield u'  <icon>%s</icon>\n' % escape(self.icon)
+        if self.logo:
+            yield u'  <logo>%s</logo>\n' % escape(self.logo)
+        if self.rights:
+            yield '  ' + _make_text_block('rights', self.rights,
+                                          self.rights_type)
+        generator_name, generator_url, generator_version = self.generator
+        if generator_name or generator_url or generator_version:
+            tmp = [u'  <generator']
+            if generator_url:
+                tmp.append(u' uri="%s"' % escape(generator_url, True))
+            if generator_version:
+                tmp.append(u' version="%s"' % escape(generator_version, True))
+            tmp.append(u'>%s</generator>\n' % escape(generator_name))
+            yield u''.join(tmp)
+        for entry in self.entries:
+            for line in entry.generate():
+                yield u'  ' + line
+        yield u'</feed>\n'
+
+    def to_string(self):
+        """Convert the feed into a string."""
+        return u''.join(self.generate())
+
+    def get_response(self):
+        """Return a response object for the feed."""
+        return BaseResponse(self.to_string(), mimetype='application/atom+xml')
+
+    def __call__(self, environ, start_response):
+        """Use the class as WSGI response object."""
+        return self.get_response()(environ, start_response)
+
+    def __unicode__(self):
+        return self.to_string()
+
+    def __str__(self):
+        return self.to_string().encode('utf-8')
+
+
+class FeedEntry(object):
+    """Represents a single entry in a feed.
+
+    :param title: the title of the entry. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param content: the content of the entry.
+    :param content_type: the type attribute for the content element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param summary: a summary of the entry's content.
+    :param summary_type: the type attribute for the summary element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the entry.
+    :param id: a globally unique id for the entry.  Must be an URI.  If
+               not present the URL is used, but one of both is required.
+    :param updated: the time the entry was modified the last time.  Must
+                    be a :class:`datetime.datetime` object. Required.
+    :param author: the author of the feed.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if not every entry has an
+                   author element.
+    :param published: the time the entry was initially published.  Must
+                      be a :class:`datetime.datetime` object.
+    :param rights: copyright information for the entry.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param xml_base: The xml base (url) for this feed item.  If not provided
+                     it will default to the item url.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
+
+    def __init__(self, title=None, content=None, feed_url=None, **kwargs):
+        self.title = title
+        self.title_type = kwargs.get('title_type', 'text')
+        self.content = content
+        self.content_type = kwargs.get('content_type', 'html')
+        self.url = kwargs.get('url')
+        self.id = kwargs.get('id', self.url)
+        self.updated = kwargs.get('updated')
+        self.summary = kwargs.get('summary')
+        self.summary_type = kwargs.get('summary_type', 'html')
+        self.author = kwargs.get('author')
+        self.published = kwargs.get('published')
+        self.rights = kwargs.get('rights')
+        self.links = kwargs.get('links', [])
+        self.xml_base = kwargs.get('xml_base', feed_url)
+
+        if not hasattr(self.author, '__iter__') \
+           or isinstance(self.author, (basestring, dict)):
+            self.author = [self.author]
+        for i, author in enumerate(self.author):
+            if not isinstance(author, dict):
+                self.author[i] = {'name': author}
+
+        if not self.title:
+            raise ValueError('title is required')
+        if not self.id:
+            raise ValueError('id is required')
+        if not self.updated:
+            raise ValueError('updated is required')
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.title
+        )
+
+    def generate(self):
+        """Yields pieces of ATOM XML."""
+        base = ''
+        if self.xml_base:
+            base = ' xml:base="%s"' % escape(self.xml_base, True)
+        yield u'<entry%s>\n' % base
+        yield u'  ' + _make_text_block('title', self.title, self.title_type)
+        yield u'  <id>%s</id>\n' % escape(self.id)
+        yield u'  <updated>%s</updated>\n' % format_iso8601(self.updated)
+        if self.published:
+            yield u'  <published>%s</published>\n' % \
+                  format_iso8601(self.published)
+        if self.url:
+            yield u'  <link href="%s" />\n' % escape(self.url)
+        for author in self.author:
+            yield u'  <author>\n'
+            yield u'    <name>%s</name>\n' % escape(author['name'])
+            if 'uri' in author:
+                yield u'    <uri>%s</uri>\n' % escape(author['uri'])
+            if 'email' in author:
+                yield u'    <email>%s</email>\n' % escape(author['email'])
+            yield u'  </author>\n'
+        for link in self.links:
+            yield u'  <link %s/>\n' % ''.join('%s="%s" ' % \
+                (k, escape(link[k], True)) for k in link)
+        if self.summary:
+            yield u'  ' + _make_text_block('summary', self.summary,
+                                           self.summary_type)
+        if self.content:
+            yield u'  ' + _make_text_block('content', self.content,
+                                           self.content_type)
+        yield u'</entry>\n'
+
+    def to_string(self):
+        """Convert the feed item into a unicode object."""
+        return u''.join(self.generate())
+
+    def __unicode__(self):
+        return self.to_string()
+
+    def __str__(self):
+        return self.to_string().encode('utf-8')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/cache.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,511 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.cache
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    The main problem with dynamic Web sites is, well, they're dynamic.  Each
+    time a user requests a page, the webserver executes a lot of code, queries
+    the database, renders templates until the visitor gets the page he sees.
+
+    This is a lot more expensive than just loading a file from the file system
+    and sending it to the visitor.
+
+    For most Web applications, this overhead isn't a big deal but once it
+    becomes, you will be glad to have a cache system in place.
+
+    How Caching Works
+    =================
+
+    Caching is pretty simple.  Basically you have a cache object lurking around
+    somewhere that is connected to a remote cache or the file system or
+    something else.  When the request comes in you check if the current page
+    is already in the cache and if, you're returning it.  Otherwise you generate
+    the page and put it into the cache.  (Or a fragment of the page, you don't
+    have to cache the full thing)
+
+    Here a simple example of how to cache a sidebar for a template::
+
+        def get_sidebar(user):
+            identifier = 'sidebar_for/user%d' % user.id
+            value = cache.get(identifier)
+            if value is not None:
+                return value
+            value = generate_sidebar_for(user=user)
+            cache.set(identifier, value, timeout=60 * 5)
+            return value
+
+    Creating a Cache Object
+    =======================
+
+    To create a cache object you just import the cache system of your choice
+    from the cache module and instanciate it.  Then you can start working
+    with that object:
+
+    >>> from werkzeug.contrib.cache import SimpleCache
+    >>> c = SimpleCache()
+    >>> c.set("foo", "value")
+    >>> c.get("foo")
+    'value'
+    >>> c.get("missing") is None
+    True
+
+    Please keep in mind that you have to create the cache and put it somewhere
+    you have access to it (either as a module global you can import or if you
+    put it onto your WSGI application).
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import re
+try:
+    from hashlib import md5
+except ImportError:
+    from md5 import new as md5
+from itertools import izip
+from time import time
+from cPickle import loads, dumps, load, dump, HIGHEST_PROTOCOL
+
+
+class BaseCache(object):
+    """Baseclass for the cache systems.  All the cache systems implement this
+    API or a superset of it.
+
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`set`.
+    """
+
+    def __init__(self, default_timeout=300):
+        self.default_timeout = default_timeout
+
+    def get(self, key):
+        """Looks up key in the cache and returns it.  If the key does not
+        exist `None` is returned instead.
+
+        :param key: the key to be looked up.
+        """
+        return None
+
+    def delete(self, key):
+        """Deletes `key` from the cache.  If it does not exist in the cache
+        nothing happens.
+
+        :param key: the key to delete.
+        """
+        pass
+
+    def get_many(self, *keys):
+        """Returns a list of keys.  For each key a item in the list is
+        created.  Example::
+
+            foo, bar = cache.get_many("foo", "bar")
+
+        If a key can't be looked up `None` is returned for that key
+        instead.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        return map(self.get, keys)
+
+    def get_dict(self, *keys):
+        """Works like :meth:`get_many` but returns a dict::
+
+            d = cache.get_dict("foo", "bar")
+            foo = d["foo"]
+            bar = d["bar"]
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        return dict(izip(keys, self.get_many(*keys)))
+
+    def set(self, key, value, timeout=None):
+        """Adds or overrides a key in the cache.
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
+        pass
+
+    def add(self, key, value, timeout=None):
+        """Works like :meth:`set` but does not override already existing
+        values.
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
+        pass
+
+    def set_many(self, mapping, timeout=None):
+        """Sets multiple keys and values from a dict.
+
+        :param mapping: a dict with the values to set.
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
+        for key, value in mapping.iteritems():
+            self.set(key, value, timeout)
+
+    def delete_many(self, *keys):
+        """Deletes multiple keys at once.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        for key in keys:
+            self.delete(key)
+
+    def clear(self):
+        """Clears the cache.  Keep in mind that not all caches support
+        clearning of the full cache.
+        """
+        pass
+
+    def inc(self, key, delta=1):
+        """Increments the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to add.
+        """
+        self.set(key, (self.get(key) or 0) + delta)
+
+    def dec(self, key, delta=1):
+        """Decrements the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `-delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to subtract.
+        """
+        self.set(key, (self.get(key) or 0) - delta)
+
+
+class NullCache(BaseCache):
+    """A cache that doesn't cache.  This can be useful for unit testing.
+
+    :param default_timeout: a dummy parameter that is ignored but exists
+                            for API compatibility with other caches.
+    """
+
+
+class SimpleCache(BaseCache):
+    """Simple memory cache for single process environments.  This class exists
+    mainly for the development server and is not 100% thread safe.  It tries
+    to use as many atomic operations as possible and no locks for simplicity
+    but it could happen under heavy load that keys are added multiple times.
+
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    """
+
+    def __init__(self, threshold=500, default_timeout=300):
+        BaseCache.__init__(self, default_timeout)
+        self._cache = {}
+        self.clear = self._cache.clear
+        self._threshold = threshold
+
+    def _prune(self):
+        if len(self._cache) > self._threshold:
+            now = time()
+            for idx, (key, (expires, _)) in enumerate(self._cache.items()):
+                if expires <= now or idx % 3 == 0:
+                    self._cache.pop(key, None)
+
+    def get(self, key):
+        now = time()
+        expires, value = self._cache.get(key, (0, None))
+        if expires > time():
+            return loads(value)
+
+    def set(self, key, value, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        self._prune()
+        self._cache[key] = (time() + timeout, dumps(value, HIGHEST_PROTOCOL))
+
+    def add(self, key, value, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        if len(self._cache) > self._threshold:
+            self._prune()
+        item = (time() + timeout, dumps(value, HIGHEST_PROTOCOL))
+        self._cache.setdefault(key, item)
+
+    def delete(self, key):
+        self._cache.pop(key, None)
+
+
+_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
+
+class MemcachedCache(BaseCache):
+    """A cache that uses memcached as backend.
+
+    The first argument can either be a list or tuple of server addresses
+    in which case Werkzeug tries to import the memcache module and connect
+    to it, or an object that resembles the API of a :class:`memcache.Client`.
+
+    Implementation notes:  This cache backend works around some limitations in
+    memcached to simplify the interface.  For example unicode keys are encoded
+    to utf-8 on the fly.  Methods such as :meth:`~BaseCache.get_dict` return
+    the keys in the same format as passed.  Furthermore all get methods
+    silently ignore key errors to not cause problems when untrusted user data
+    is passed to the get methods which is often the case in web applications.
+
+    :param servers: a list or tuple of server addresses or alternatively
+                    a :class:`memcache.Client` or a compatible client.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    :param key_prefix: a prefix that is added before all keys.  This makes it
+                       possible to use the same memcached server for different
+                       applications.  Keep in mind that
+                       :meth:`~BaseCache.clear` will also clear keys with a
+                       different prefix.
+    """
+
+    def __init__(self, servers, default_timeout=300, key_prefix=None):
+        BaseCache.__init__(self, default_timeout)
+        if isinstance(servers, (list, tuple)):
+            try:
+                import cmemcache as memcache
+                is_cmemcache = True
+            except ImportError:
+                try:
+                    import memcache
+                    is_cmemcache = False
+                except ImportError:
+                    raise RuntimeError('no memcache module found')
+
+            # cmemcache has a bug that debuglog is not defined for the
+            # client.  Whenever pickle fails you get a weird AttributError.
+            if is_cmemcache:
+                client = memcache.Client(map(str, servers))
+                try:
+                    client.debuglog = lambda *a: None
+                except:
+                    pass
+            else:
+                client = memcache.Client(servers, False, HIGHEST_PROTOCOL)
+        else:
+            client = servers
+
+        self._client = client
+        self.key_prefix = key_prefix
+
+    def get(self, key):
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        # memcached doesn't support keys longer than that.  Because often
+        # checks for so long keys can occour because it's tested from user
+        # submitted data etc we fail silently for getting.
+        if _test_memcached_key(key):
+            return self._client.get(key)
+
+    def get_dict(self, *keys):
+        key_mapping = {}
+        have_encoded_keys = False
+        for idx, key in enumerate(keys):
+            if isinstance(key, unicode):
+                encoded_key = key.encode('utf-8')
+                have_encoded_keys = True
+            else:
+                encoded_key = key
+            if self.key_prefix:
+                encoded_key = self.key_prefix + encoded_key
+            if _test_memcached_key(key):
+                key_mapping[encoded_key] = key
+        # the keys call here is important because otherwise cmemcache
+        # does ugly things.  What exaclty I don't know, i think it does
+        # Py_DECREF but quite frankly i don't care.
+        d = rv = self._client.get_multi(key_mapping.keys())
+        if have_encoded_keys or self.key_prefix:
+            rv = {}
+            for key, value in d.iteritems():
+                rv[key_mapping[key]] = value
+        if len(rv) < len(keys):
+            for key in keys:
+                if key not in rv:
+                    rv[key] = None
+        return rv
+
+    def add(self, key, value, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.add(key, value, timeout)
+
+    def set(self, key, value, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.set(key, value, timeout)
+
+    def get_many(self, *keys):
+        d = self.get_dict(*keys)
+        return [d[key] for key in keys]
+
+    def set_many(self, mapping, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        new_mapping = {}
+        for key, value in mapping.iteritems():
+            if isinstance(key, unicode):
+                key = key.encode('utf-8')
+            if self.key_prefix:
+                key = self.key_prefix + key
+            new_mapping[key] = value
+        self._client.set_multi(new_mapping, timeout)
+
+    def delete(self, key):
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        if _test_memcached_key(key):
+            self._client.delete(key)
+
+    def delete_many(self, *keys):
+        new_keys = []
+        for key in keys:
+            if isinstance(key, unicode):
+                key = key.encode('utf-8')
+            if self.key_prefix:
+                key = self.key_prefix + key
+            if _test_memcached_key(key):
+                new_keys.append(key)
+        self._client.delete_multi(new_keys)
+
+    def clear(self):
+        self._client.flush_all()
+
+    def inc(self, key, delta=1):
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.incr(key, delta)
+
+    def dec(self, key, delta=1):
+        if isinstance(key, unicode):
+            key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.decr(key, delta)
+
+
+class GAEMemcachedCache(MemcachedCache):
+    """Connects to the Google appengine memcached Cache.
+
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    :param key_prefix: a prefix that is added before all keys.  This makes it
+                       possible to use the same memcached server for different
+                       applications.  Keep in mind that
+                       :meth:`~BaseCache.clear` will also clear keys with a
+                       different prefix.
+    """
+
+    def __init__(self, default_timeout=300, key_prefix=None):
+        from google.appengine.api import memcache
+        MemcachedCache.__init__(self, memcache.Client(),
+                                default_timeout, key_prefix)
+
+
+class FileSystemCache(BaseCache):
+    """A cache that stores the items on the file system.  This cache depends
+    on being the only user of the `cache_dir`.  Make absolutely sure that
+    nobody but this cache stores files there or otherwise the chace will
+    randomely delete files therein.
+
+    :param cache_dir: the directory where cached files are stored.
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    """
+
+    def __init__(self, cache_dir, threshold=500, default_timeout=300):
+        BaseCache.__init__(self, default_timeout)
+        self._path = cache_dir
+        self._threshold = threshold
+        if not os.path.exists(self._path):
+            os.makedirs(self._path)
+
+    def _prune(self):
+        entries = os.listdir(self._path)
+        if len(entries) > self._threshold:
+            now = time()
+            for idx, key in enumerate(entries):
+                try:
+                    f = file(self._get_filename(key))
+                    if load(f) > now and idx % 3 != 0:
+                        f.close()
+                        continue
+                except:
+                    f.close()
+                self.delete(key)
+
+    def _get_filename(self, key):
+        hash = md5(key).hexdigest()
+        return os.path.join(self._path, hash)
+
+    def get(self, key):
+        filename = self._get_filename(key)
+        try:
+            f = file(filename, 'rb')
+            try:
+                if load(f) >= time():
+                    return load(f)
+            finally:
+                f.close()
+            os.remove(filename)
+        except:
+            return None
+
+    def add(self, key, value, timeout=None):
+        filename = self._get_filename(key)
+        if not os.path.exists(filename):
+            self.set(key, value, timeout)
+
+    def set(self, key, value, timeout=None):
+        if timeout is None:
+            timeout = self.default_timeout
+        filename = self._get_filename(key)
+        self._prune()
+        try:
+            f = file(filename, 'wb')
+            try:
+                dump(int(time() + timeout), f, 1)
+                dump(value, f, HIGHEST_PROTOCOL)
+            finally:
+                f.close()
+        except (IOError, OSError):
+            pass
+
+    def delete(self, key):
+        try:
+            os.remove(self._get_filename(key))
+        except (IOError, OSError):
+            pass
+
+    def clear(self):
+        for key in os.listdir(self._path):
+            self.delete(key)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/fixers.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.fixers
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    .. versionadded:: 0.5
+
+    This module includes various helpers that fix bugs in web servers.  They may
+    be necessary for some versions of a buggy web server but not others.  We try
+    to stay updated with the status of the bugs as good as possible but you have
+    to make sure whether they fix the problem you encounter.
+
+    If you notice bugs in webservers not fixed in this module consider
+    contributing a patch.
+
+    :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from urllib import unquote
+from werkzeug.http import parse_options_header, parse_cache_control_header, \
+     parse_set_header, dump_header
+from werkzeug.useragents import UserAgent
+from werkzeug.datastructures import Headers, ResponseCacheControl
+
+
+class LighttpdCGIRootFix(object):
+    """Wrap the application in this middleware if you are using lighttpd
+    with FastCGI or CGI and the application is mounted on the URL root.
+
+    :param app: the WSGI application
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
+                               environ.get('PATH_INFO', '')
+        environ['SCRIPT_NAME'] = ''
+        return self.app(environ, start_response)
+
+
+class PathInfoFromRequestUriFix(object):
+    """On windows environment variables are limited to the system charset
+    which makes it impossible to store the `PATH_INFO` variable in the
+    environment without loss of information on some systems.
+
+    This is for example a problem for CGI scripts on a Windows Apache.
+
+    This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
+    `REQUEST_URL`, or `UNENCODED_URL` (whatever is available).  Thus the
+    fix can only be applied if the webserver supports either of these
+    variables.
+
+    :param app: the WSGI application
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
+            if key not in environ:
+                continue
+            request_uri = unquote(environ[key])
+            script_name = unquote(environ.get('SCRIPT_NAME', ''))
+            if request_uri.startswith(script_name):
+                environ['PATH_INFO'] = request_uri[len(script_name):] \
+                    .split('?', 1)[0]
+                break
+        return self.app(environ, start_response)
+
+
+class ProxyFix(object):
+    """This middleware can be applied to add HTTP proxy support to an
+    application that was not designed with HTTP proxies in mind.  It
+    sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
+
+    Werkzeug wrappers have builtin support for this by setting the
+    :attr:`~werkzeug.BaseRequest.is_behind_proxy` attribute to `True`.
+
+    Do not use this middleware in non-proxy setups for security reasons.
+
+    The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
+    the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
+    `werkzeug.proxy_fix.orig_http_host`.
+
+    :param app: the WSGI application
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        getter = environ.get
+        forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
+        forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
+        environ.update({
+            'werkzeug.proxy_fix.orig_remote_addr':  getter('REMOTE_ADDR'),
+            'werkzeug.proxy_fix.orig_http_host':    getter('HTTP_HOST')
+        })
+        if forwarded_for:
+            environ['REMOTE_ADDR'] = forwarded_for[0].strip()
+        if forwarded_host:
+            environ['HTTP_HOST'] = forwarded_host
+        return self.app(environ, start_response)
+
+
+class HeaderRewriterFix(object):
+    """This middleware can remove response headers and add others.  This
+    is for example useful to remove the `Date` header from responses if you
+    are using a server that adds that header, no matter if it's present or
+    not or to add `X-Powered-By` headers::
+
+        app = HeaderRewriterFix(app, remove_headers=['Date'],
+                                add_headers=[('X-Powered-By', 'WSGI')])
+
+    :param app: the WSGI application
+    :param remove_headers: a sequence of header keys that should be
+                           removed.
+    :param add_headers: a sequence of ``(key, value)`` tuples that should
+                        be added.
+    """
+
+    def __init__(self, app, remove_headers=None, add_headers=None):
+        self.app = app
+        self.remove_headers = set(x.lower() for x in (remove_headers or ()))
+        self.add_headers = list(add_headers or ())
+
+    def __call__(self, environ, start_response):
+        def rewriting_start_response(status, headers, exc_info=None):
+            new_headers = []
+            for key, value in headers:
+                if key.lower() not in self.remove_headers:
+                    new_headers.append((key, value))
+            new_headers += self.add_headers
+            return start_response(status, new_headers, exc_info)
+        return self.app(environ, rewriting_start_response)
+
+
+class InternetExplorerFix(object):
+    """This middleware fixes a couple of bugs with Microsoft Internet
+    Explorer.  Currently the following fixes are applied:
+
+    -   removing of `Vary` headers for unsupported mimetypes which
+        causes troubles with caching.  Can be disabled by passing
+        ``fix_vary=False`` to the constructor.
+        see: http://support.microsoft.com/kb/824847/en-us
+
+    -   removes offending headers to work around caching bugs in
+        Internet Explorer if `Content-Disposition` is set.  Can be
+        disabled by passing ``fix_attach=False`` to the constructor.
+
+    If it does not detect affected Internet Explorer versions it won't touch
+    the request / response.
+    """
+
+    # This code was inspired by Django fixers for the same bugs.  The
+    # fix_vary and fix_attach fixers were originally implemented in Django
+    # by Michael Axiak and is available as part of the Django project:
+    #     http://code.djangoproject.com/ticket/4148
+
+    def __init__(self, app, fix_vary=True, fix_attach=True):
+        self.app = app
+        self.fix_vary = fix_vary
+        self.fix_attach = fix_attach
+
+    def fix_headers(self, environ, headers, status=None):
+        if self.fix_vary:
+            header = headers.get('content-type', '')
+            mimetype, options = parse_options_header(header)
+            if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
+                headers.pop('vary', None)
+
+        if self.fix_attach and 'content-disposition' in headers:
+            pragma = parse_set_header(headers.get('pragma', ''))
+            pragma.discard('no-cache')
+            header = pragma.to_header()
+            if not header:
+                headers.pop('pragma', '')
+            else:
+                headers['Pragma'] = header
+            header = headers.get('cache-control', '')
+            if header:
+                cc = parse_cache_control_header(header,
+                                                cls=ResponseCacheControl)
+                cc.no_cache = None
+                cc.no_store = False
+                header = cc.to_header()
+                if not header:
+                    headers.pop('cache-control', '')
+                else:
+                    headers['Cache-Control'] = header
+
+    def run_fixed(self, environ, start_response):
+        def fixing_start_response(status, headers, exc_info=None):
+            self.fix_headers(environ, Headers.linked(headers), status)
+            return start_response(status, headers, exc_info)
+        return self.app(environ, fixing_start_response)
+
+    def __call__(self, environ, start_response):
+        ua = UserAgent(environ)
+        if ua.browser != 'msie':
+            return self.app(environ, start_response)
+        return self.run_fixed(environ, start_response)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/iterio.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.iterio
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module implements a :class:`IterIO` that converts an iterator into
+    a stream object and the other way round.  Converting streams into
+    iterators requires the `greenlet`_ module.
+
+    To convert an iterator into a stream all you have to do is to pass it
+    directly to the :class:`IterIO` constructor.  In this example we pass it
+    a newly created generator::
+
+        def foo():
+            yield "something\n"
+            yield "otherthings"
+        stream = IterIO(foo())
+        print stream.read()         # read the whole iterator
+
+    The other way round works a bit different because we have to ensure that
+    the code execution doesn't take place yet.  An :class:`IterIO` call with a
+    callable as first argument does two things.  The function itself is passed
+    an :class:`IterIO` stream it can feed.  The object returned by the
+    :class:`IterIO` constructor on the other hand is not an stream object but
+    an iterator::
+
+        def foo(stream):
+            stream.write("some")
+            stream.write("thing")
+            stream.flush()
+            stream.write("otherthing")
+        iterator = IterIO(foo)
+        print iterator.next()       # prints something
+        print iterator.next()       # prints otherthing
+        iterator.next()             # raises StopIteration
+
+    .. _greenlet: http://codespeak.net/py/dist/greenlet.html
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+try:
+    from py.magic import greenlet
+except:
+    greenlet = None
+
+
+class IterIO(object):
+    """Instances of this object implement an interface compatible with the
+    standard Python :class:`file` object.  Streams are either read-only or
+    write-only depending on how the object is created.
+    """
+
+    def __new__(cls, obj):
+        try:
+            iterator = iter(obj)
+        except TypeError:
+            return IterI(obj)
+        return IterO(iterator)
+
+    def __iter__(self):
+        return self
+
+    def tell(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        return self.pos
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        return False
+
+    def seek(self, pos, mode=0):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def truncate(self, size=None):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def writelines(self, list):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def read(self, n=-1):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def readlines(self, sizehint=0):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def flush(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        raise IOError(9, 'Bad file descriptor')
+
+    def next(self):
+        if self.closed:
+            raise StopIteration()
+        line = self.readline()
+        if not line:
+            raise StopIteration()
+        return line
+
+
+class IterI(IterIO):
+    """Convert an stream into an iterator."""
+
+    def __new__(cls, func):
+        if greenlet is None:
+            raise RuntimeError('IterI requires greenlet support')
+        stream = object.__new__(cls)
+        stream.__init__(greenlet.getcurrent())
+
+        def run():
+            func(stream)
+            stream.flush()
+
+        g = greenlet(run, stream._parent)
+        while 1:
+            rv = g.switch()
+            if not rv:
+                return
+            yield rv[0]
+
+    def __init__(self, parent):
+        self._parent = parent
+        self._buffer = []
+        self.closed = False
+        self.pos = 0
+
+    def close(self):
+        if not self.closed:
+            self.closed = True
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        self.pos += len(s)
+        self._buffer.append(s)
+
+    def writelines(self, list):
+        self.write(''.join(list))
+
+    def flush(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        data = ''.join(self._buffer)
+        self._buffer = []
+        self._parent.switch((data,))
+
+
+class IterO(IterIO):
+    """Iter output.  Wrap an iterator and give it a stream like interface."""
+
+    def __new__(cls, gen):
+        return object.__new__(cls)
+
+    def __init__(self, gen):
+        self._gen = gen
+        self._buf = ''
+        self.closed = False
+        self.pos = 0
+
+    def __iter__(self):
+        return self
+
+    def close(self):
+        if not self.closed:
+            self.closed = True
+            if hasattr(self._gen, 'close'):
+                self._gen.close()
+
+    def seek(self, pos, mode=0):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        if mode == 1:
+            pos += self.pos
+        elif mode == 2:
+            self.read()
+            self.pos = min(self.pos, self.pos + pos)
+            return
+        elif mode != 0:
+            raise IOError('Invalid argument')
+        buf = []
+        try:
+            tmp_end_pos = len(self._buf)
+            while pos > tmp_end_pos:
+                item = self._gen.next()
+                tmp_end_pos += len(item)
+                buf.append(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf += ''.join(buf)
+        self.pos = max(0, pos)
+
+    def read(self, n=-1):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        if n < 0:
+            self._buf += ''.join(self._gen)
+            result = self._buf[self.pos:]
+            self.pos += len(result)
+            return result
+        new_pos = self.pos + n
+        buf = []
+        try:
+            tmp_end_pos = len(self._buf)
+            while new_pos > tmp_end_pos:
+                item = self._gen.next()
+                tmp_end_pos += len(item)
+                buf.append(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf += ''.join(buf)
+        new_pos = max(0, new_pos)
+        try:
+            return self._buf[self.pos:new_pos]
+        finally:
+            self.pos = min(new_pos, len(self._buf))
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        nl_pos = self._buf.find('\n', self.pos)
+        buf = []
+        try:
+            pos = self.pos
+            while nl_pos < 0:
+                item = self._gen.next()
+                local_pos = item.find('\n')
+                buf.append(item)
+                if local_pos >= 0:
+                    nl_pos = pos + local_pos
+                    break
+                pos += len(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf += ''.join(buf)
+        if nl_pos < 0:
+            new_pos = len(self._buf)
+        else:
+            new_pos = nl_pos + 1
+        if length is not None and self.pos + length < new_pos:
+            new_pos = self.pos + length
+        try:
+            return self._buf[self.pos:new_pos]
+        finally:
+            self.pos = min(new_pos, len(self._buf))
+
+    def readlines(self, sizehint=0):
+        total = 0
+        lines = []
+        line = self.readline()
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline()
+        return lines
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/jsrouting.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.jsrouting
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Addon module that allows to create a JavaScript function from a map
+    that generates rules.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+try:
+    from simplejson import dumps
+except ImportError:
+    def dumps(*args):
+        raise RuntimeError('simplejson required for jsrouting')
+
+from inspect import getmro
+from werkzeug.templates import Template
+from werkzeug.routing import NumberConverter
+
+
+_javascript_routing_template = Template(u'''\
+<% if name_parts %>\
+<% for idx in xrange(0, len(name_parts) - 1) %>\
+if (typeof ${'.'.join(name_parts[:idx + 1])} === 'undefined') \
+${'.'.join(name_parts[:idx + 1])} = {};
+<% endfor %>\
+${'.'.join(name_parts)} = <% endif %>\
+(function (server_name, script_name, subdomain, url_scheme) {
+    var converters = ${', '.join(converters)};
+    var rules = $rules;
+    function in_array(array, value) {
+        if (array.indexOf != undefined) {
+            return array.indexOf(value) != -1;
+        }
+        for (var i = 0; i < array.length; i++) {
+            if (array[i] == value) {
+                return true;
+            }
+        }
+        return false;
+    }
+    function array_diff(array1, array2) {
+        array1 = array1.slice();
+        for (var i = array1.length-1; i >= 0; i--) {
+            if (in_array(array2, array1[i])) {
+                array1.splice(i, 1);
+            }
+        }
+        return array1;
+    }
+    function split_obj(obj) {
+        var names = [];
+        var values = [];
+        for (var name in obj) {
+            if (typeof(obj[name]) != 'function') {
+                names.push(name);
+                values.push(obj[name]);
+            }
+        }
+        return {names: names, values: values, original: obj};
+    }
+    function suitable(rule, args) {
+        var default_args = split_obj(rule.defaults || {});
+        var diff_arg_names = array_diff(rule.arguments, default_args.names);
+
+        for (var i = 0; i < diff_arg_names.length; i++) {
+            if (!in_array(args.names, diff_arg_names[i])) {
+                return false;
+            }
+        }
+
+        if (array_diff(rule.arguments, args.names).length == 0) {
+            if (rule.defaults == null) {
+                return true;
+            }
+            for (var i = 0; i < default_args.names.length; i++) {
+                var key = default_args.names[i];
+                var value = default_args.values[i];
+                if (value != args.original[key]) {
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+    function build(rule, args) {
+        var tmp = [];
+        var processed = rule.arguments.slice();
+        for (var i = 0; i < rule.trace.length; i++) {
+            var part = rule.trace[i];
+            if (part.is_dynamic) {
+                var converter = converters[rule.converters[part.data]];
+                var data = converter(args.original[part.data]);
+                if (data == null) {
+                    return null;
+                }
+                tmp.push(data);
+                processed.push(part.name);
+            } else {
+                tmp.push(part.data);
+            }
+        }
+        tmp = tmp.join('');
+        var pipe = tmp.indexOf('|');
+        var subdomain = tmp.substring(0, pipe);
+        var url = tmp.substring(pipe+1);
+
+        var unprocessed = array_diff(args.names, processed);
+        var first_query_var = true;
+        for (var i = 0; i < unprocessed.length; i++) {
+            if (first_query_var) {
+                url += '?';
+            } else {
+                url += '&';
+            }
+            first_query_var = false;
+            url += encodeURIComponent(unprocessed[i]);
+            url += '=';
+            url += encodeURIComponent(args.original[unprocessed[i]]);
+        }
+        return {subdomain: subdomain, path: url};
+    }
+    function lstrip(s, c) {
+        while (s && s.substring(0, 1) == c) {
+            s = s.substring(1);
+        }
+        return s;
+    }
+    function rstrip(s, c) {
+        while (s && s.substring(s.length-1, s.length) == c) {
+            s = s.substring(0, s.length-1);
+        }
+        return s;
+    }
+    return function(endpoint, args, force_external) {
+        args = split_obj(args);
+        var rv = null;
+        for (var i = 0; i < rules.length; i++) {
+            var rule = rules[i];
+            if (rule.endpoint != endpoint) continue;
+            if (suitable(rule, args)) {
+                rv = build(rule, args);
+                if (rv != null) {
+                    break;
+                }
+            }
+        }
+        if (rv == null) {
+            return null;
+        }
+        if (!force_external && rv.subdomain == subdomain) {
+            return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
+        } else {
+            return url_scheme + '://'
+                   + (rv.subdomain ? rv.subdomain + '.' : '')
+                   + server_name + rstrip(script_name, '/')
+                   + '/' + lstrip(rv.path, '/');
+        }
+    };
+})''')
+
+
+def generate_map(map, name='url_map'):
+    """
+    Generates a JavaScript function containing the rules defined in
+    this map, to be used with a MapAdapter's generate_javascript
+    method.  If you don't pass a name the returned JavaScript code is
+    an expression that returns a function.  Otherwise it's a standalone
+    script that assigns the function with that name.  Dotted names are
+    resolved (so you an use a name like 'obj.url_for')
+
+    In order to use JavaScript generation, simplejson must be installed.
+
+    Note that using this feature will expose the rules
+    defined in your map to users. If your rules contain sensitive
+    information, don't use JavaScript generation!
+    """
+    map.update()
+    rules = []
+    converters = []
+    for rule in map.iter_rules():
+        trace = [{
+            'is_dynamic':   is_dynamic,
+            'data':         data
+        } for is_dynamic, data in rule._trace]
+        rule_converters = {}
+        for key, converter in rule._converters.iteritems():
+            js_func = js_to_url_function(converter)
+            try:
+                index = converters.index(js_func)
+            except ValueError:
+                converters.append(js_func)
+                index = len(converters) - 1
+            rule_converters[key] = index
+        rules.append({
+            u'endpoint':    rule.endpoint,
+            u'arguments':   list(rule.arguments),
+            u'converters':  rule_converters,
+            u'trace':       trace,
+            u'defaults':    rule.defaults
+        })
+
+    return _javascript_routing_template.render({
+        'name_parts':   name and name.split('.') or [],
+        'rules':        dumps(rules),
+        'converters':   converters
+    })
+
+
+def generate_adapter(adapter, name='url_for', map_name='url_map'):
+    """Generates the url building function for a map."""
+    values = {
+        u'server_name':     dumps(adapter.server_name),
+        u'script_name':     dumps(adapter.script_name),
+        u'subdomain':       dumps(adapter.subdomain),
+        u'url_scheme':      dumps(adapter.url_scheme),
+        u'name':            name,
+        u'map_name':        map_name
+    }
+    return u'''\
+var %(name)s = %(map_name)s(
+    %(server_name)s,
+    %(script_name)s,
+    %(subdomain)s,
+    %(url_scheme)s
+);''' % values
+
+
+def js_to_url_function(converter):
+    """Get the JavaScript converter function from a rule."""
+    if hasattr(converter, 'js_to_url_function'):
+        data = converter.js_to_url_function()
+    else:
+        for cls in getmro(type(converter)):
+            if cls in js_to_url_functions:
+                data = js_to_url_functions[cls](converter)
+                break
+        else:
+            return 'encodeURIComponent'
+    return '(function(value) { %s })' % data
+
+
+def NumberConverter_js_to_url(conv):
+    if conv.fixed_digits:
+        return u'''\
+var result = value.toString();
+while (result.length < %s)
+    result = '0' + result;
+return result;''' % conv.fixed_digits
+    return u'return value.toString();'
+
+
+js_to_url_functions = {
+    NumberConverter:    NumberConverter_js_to_url
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/kickstart.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,284 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.kickstart
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides some simple shortcuts to make using Werkzeug simpler
+    for small scripts.
+
+    These improvements include predefined `Request` and `Response` objects as
+    well as a predefined `Application` object which can be customized in child
+    classes, of course.  The `Request` and `Reponse` objects handle URL
+    generation as well as sessions via `werkzeug.contrib.sessions` and are
+    purely optional.
+
+    There is also some integration of template engines.  The template loaders
+    are, of course, not neccessary to use the template engines in Werkzeug,
+    but they provide a common interface.  Currently supported template engines
+    include Werkzeug's minitmpl and Genshi_.  Support for other engines can be
+    added in a trivial way.  These loaders provide a template interface
+    similar to the one used by Django_.
+
+    .. _Genshi: http://genshi.edgewall.org/
+    .. _Django: http://www.djangoproject.com/
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from os import path
+from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
+from werkzeug.templates import Template
+from werkzeug.exceptions import HTTPException
+from werkzeug.routing import RequestRedirect
+
+__all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader',
+           'GenshiTemplateLoader', 'Application']
+
+
+class Request(RequestBase):
+    """A handy subclass of the base request that adds a URL builder.
+    It when supplied a session store, it is also able to handle sessions.
+    """
+
+    def __init__(self, environ, url_map,
+            session_store=None, cookie_name=None):
+        # call the parent for initialization
+        RequestBase.__init__(self, environ)
+        # create an adapter
+        self.url_adapter = url_map.bind_to_environ(environ)
+        # create all stuff for sessions
+        self.session_store = session_store
+        self.cookie_name = cookie_name
+
+        if session_store is not None and cookie_name is not None:
+            if cookie_name in self.cookies:
+                # get the session out of the storage
+                self.session = session_store.get(self.cookies[cookie_name])
+            else:
+                # create a new session
+                self.session = session_store.new()
+
+    def url_for(self, callback, **values):
+        return self.url_adapter.build(callback, values)
+
+
+class Response(ResponseBase):
+    """
+    A subclass of base response which sets the default mimetype to text/html.
+    It the `Request` that came in is using Werkzeug sessions, this class
+    takes care of saving that session.
+    """
+    default_mimetype = 'text/html'
+
+    def __call__(self, environ, start_response):
+        # get the request object
+        request = environ['werkzeug.request']
+
+        if request.session_store is not None:
+            # save the session if neccessary
+            request.session_store.save_if_modified(request.session)
+
+            # set the cookie for the browser if it is not there:
+            if request.cookie_name not in request.cookies:
+                self.set_cookie(request.cookie_name, request.session.sid)
+
+        # go on with normal response business
+        return ResponseBase.__call__(self, environ, start_response)
+
+
+class Processor(object):
+    """A request and response processor - it is what Django calls a
+    middleware, but Werkzeug also includes straight-foward support for real
+    WSGI middlewares, so another name was chosen.
+
+    The code of this processor is derived from the example in the Werkzeug
+    trac, called `Request and Response Processor
+    <http://dev.pocoo.org/projects/werkzeug/wiki/RequestResponseProcessor>`_
+    """
+
+    def process_request(self, request):
+        return request
+
+    def process_response(self, request, response):
+        return response
+
+    def process_view(self, request, view_func, view_args, view_kwargs):
+        """process_view() is called just before the Application calls the
+        function specified by view_func.
+
+        If this returns None, the Application processes the next Processor,
+        and if it returns something else (like a Response instance), that
+        will be returned without any further processing.
+        """
+        return None
+
+    def process_exception(self, request, exception):
+        return None
+
+
+class Application(object):
+    """A generic WSGI application which can be used to start with Werkzeug in
+    an easy, straightforward way.
+    """
+
+    def __init__(self, name, url_map, session=False, processors=None):
+        # save the name and the URL-map, as it'll be needed later on
+        self.name = name
+        self.url_map = url_map
+        # save the list of processors if supplied
+        self.processors = processors or []
+        # create an instance of the storage
+        if session:
+            self.store = session
+        else:
+            self.store = None
+
+    def __call__(self, environ, start_response):
+        # create a request - with or without session support
+        if self.store is not None:
+            request = Request(environ, self.url_map,
+                session_store=self.store, cookie_name='%s_sid' % self.name)
+        else:
+            request = Request(environ, self.url_map)
+
+        # apply the request processors
+        for processor in self.processors:
+            request = processor.process_request(request)
+
+        try:
+            # find the callback to which the URL is mapped
+            callback, args = request.url_adapter.match(request.path)
+        except (HTTPException, RequestRedirect), e:
+            response = e
+        else:
+            # check all view processors
+            for processor in self.processors:
+                action = processor.process_view(request, callback, (), args)
+                if action is not None:
+                    # it is overriding the default behaviour, this is
+                    # short-circuiting the processing, so it returns here
+                    return action(environ, start_response)
+
+            try:
+                response = callback(request, **args)
+            except Exception, exception:
+                # the callback raised some exception, need to process that
+                for processor in reversed(self.processors):
+                    # filter it through the exception processor
+                    action = processor.process_exception(request, exception)
+                    if action is not None:
+                        # the exception processor returned some action
+                        return action(environ, start_response)
+                # still not handled by a exception processor, so re-raise
+                raise
+
+        # apply the response processors
+        for processor in reversed(self.processors):
+            response = processor.process_response(request, response)
+
+        # return the completely processed response
+        return response(environ, start_response)
+
+
+    def config_session(self, store, expiration='session'):
+        """
+        Configures the setting for cookies. You can also disable cookies by
+        setting store to None.
+        """
+        self.store = store
+        # expiration=session is the default anyway
+        # TODO: add settings to define the expiration date, the domain, the
+        # path any maybe the secure parameter.
+
+
+class TemplateNotFound(IOError, LookupError):
+    """
+    A template was not found by the template loader.
+    """
+
+    def __init__(self, name):
+        IOError.__init__(self, name)
+        self.name = name
+
+
+class TemplateLoader(object):
+    """
+    A simple loader interface for the werkzeug minitmpl
+    template language.
+    """
+
+    def __init__(self, search_path, encoding='utf-8'):
+        self.search_path = path.abspath(search_path)
+        self.encoding = encoding
+
+    def get_template(self, name):
+        """Get a template from a given name."""
+        filename = path.join(self.search_path, *[p for p in name.split('/')
+                                                 if p and p[0] != '.'])
+        if not path.exists(filename):
+            raise TemplateNotFound(name)
+        return Template.from_file(filename, self.encoding)
+
+    def render_to_response(self, *args, **kwargs):
+        """Load and render a template into a response object."""
+        return Response(self.render_to_string(*args, **kwargs))
+
+    def render_to_string(self, *args, **kwargs):
+        """Load and render a template into a unicode string."""
+        try:
+            template_name, args = args[0], args[1:]
+        except IndexError:
+            raise TypeError('name of template required')
+        return self.get_template(template_name).render(*args, **kwargs)
+
+
+class GenshiTemplateLoader(TemplateLoader):
+    """A unified interface for loading Genshi templates. Actually a quite thin
+    wrapper for Genshi's TemplateLoader.
+
+    It sets some defaults that differ from the Genshi loader, most notably
+    auto_reload is active. All imporant options can be passed through to
+    Genshi.
+    The default output type is 'html', but can be adjusted easily by changing
+    the `output_type` attribute.
+    """
+    def __init__(self, search_path, encoding='utf-8', **kwargs):
+        TemplateLoader.__init__(self, search_path, encoding)
+        # import Genshi here, because we don't want a general Genshi
+        # dependency, only a local one
+        from genshi.template import TemplateLoader as GenshiLoader
+        from genshi.template.loader import TemplateNotFound
+
+        self.not_found_exception = TemplateNotFound
+        # set auto_reload to True per default
+        reload_template = kwargs.pop('auto_reload', True)
+        # get rid of default_encoding as this template loaders overwrites it
+        # with the value of encoding
+        kwargs.pop('default_encoding', None)
+
+        # now, all arguments are clean, pass them on
+        self.loader = GenshiLoader(search_path, default_encoding=encoding,
+                auto_reload=reload_template, **kwargs)
+
+        # the default output is HTML but can be overridden easily
+        self.output_type = 'html'
+        self.encoding = encoding
+
+    def get_template(self, template_name):
+        """Get the template which is at the given name"""
+        try:
+            return self.loader.load(template_name, encoding=self.encoding)
+        except self.not_found_exception, e:
+            # catch the exception raised by Genshi, convert it into a werkzeug
+            # exception (for the sake of consistency)
+            raise TemplateNotFound(template_name)
+
+    def render_to_string(self, template_name, context=None):
+        """Load and render a template into an unicode string"""
+        # create an empty context if no context was specified
+        context = context or {}
+        tmpl = self.get_template(template_name)
+        # render the template into a unicode string (None means unicode)
+        return tmpl. \
+            generate(**context). \
+            render(self.output_type, encoding=None)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/limiter.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.limiter
+    ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    A middleware that limits incoming data.  This works around problems with
+    Trac_ or Django_ because those directly stream into the memory.
+
+    .. _Trac: http://trac.edgewall.org/
+    .. _Django: http://www.djangoproject.com/
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from warnings import warn
+
+from werkzeug import LimitedStream
+
+
+class StreamLimitMiddleware(object):
+    """Limits the input stream to a given number of bytes.  This is useful if
+    you have a WSGI application that reads form data into memory (django for
+    example) and you don't want users to harm the server by uploading tons of
+    data.
+
+    Default is 10MB
+    """
+
+    def __init__(self, app, maximum_size=1024 * 1024 * 10):
+        self.app = app
+        self.maximum_size = maximum_size
+
+    def __call__(self, environ, start_response):
+        limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
+        environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
+        return self.app(environ, start_response)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/lint.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.lint
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    .. versionadded:: 0.5
+
+    This module provides a middleware that performs sanity checks of the WSGI
+    application.  It checks that :pep:`333` is properly implemented and warns
+    on some common HTTP errors such as non-empty responses for 304 status
+    codes.
+
+    This module provides a middleware, the :class:`LintMiddleware`.  Wrap your
+    application with it and it will warn about common problems with WSGI and
+    HTTP while your application is running.
+
+    It's strongly recommended to use it during development.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from urlparse import urlparse
+from warnings import warn
+
+from werkzeug import Headers, FileWrapper, is_entity_header
+
+
+class WSGIWarning(Warning):
+    """Warning class for WSGI warnings."""
+
+
+class HTTPWarning(Warning):
+    """Warning class for HTTP warnings."""
+
+
+def check_string(context, obj, stacklevel=3):
+    if type(obj) is not str:
+        warn(WSGIWarning('%s requires bytestrings, got %s' %
+            (context, obj.__class__.__name__)))
+
+
+class InputStream(object):
+
+    def __init__(self, stream):
+        self._stream = stream
+
+    def read(self, *args):
+        if len(args) == 0:
+            warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
+                             'input stream, thus making calls to '
+                             'wsgi.input.read() unsafe.  Conforming servers '
+                             'may never return from this call.'),
+                 stacklevel=2)
+        elif len(args) != 1:
+            warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
+                 stacklevel=2)
+        return self._stream.read(*args)
+
+    def readline(self, *args):
+        if len(args) == 0:
+            warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
+                             ' are unsafe.  Use wsgi.input.read() instead.'),
+                 stacklevel=2)
+        elif len(args) == 1:
+            warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
+                             'WSGI does not support this, although it\'s available '
+                             'on all major servers.'),
+                 stacklevel=2)
+        else:
+            raise TypeError('too many arguments passed to wsgi.input.readline()')
+        return self._stream.readline(*args)
+
+    def __iter__(self):
+        try:
+            return iter(self._stream)
+        except TypeError:
+            warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
+            return iter(())
+
+    def close(self):
+        warn(WSGIWarning('application closed the input stream!'),
+             stacklevel=2)
+        self._stream.close()
+
+
+class ErrorStream(object):
+
+    def __init__(self, stream):
+        self._stream = stream
+
+    def write(self, s):
+        check_string('wsgi.error.write()', s)
+        self._stream.write(s)
+
+    def flush(self):
+        self._stream.flush()
+
+    def writelines(self, seq):
+        for line in seq:
+            self.write(seq)
+
+    def close(self):
+        warn(WSGIWarning('application closed the error stream!'),
+             stacklevel=2)
+        self._stream.close()
+
+
+class GuardedWrite(object):
+
+    def __init__(self, write, chunks):
+        self._write = write
+        self._chunks = chunks
+
+    def __call__(self, s):
+        check_string('write()', s)
+        self._write.write(s)
+        self._chunks.append(len(s))
+
+
+class GuardedIterator(object):
+
+    def __init__(self, iterator, headers_set, chunks):
+        self._iterator = iterator
+        self._next = iter(iterator).next
+        self.closed = False
+        self.headers_set = headers_set
+        self.chunks = chunks
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        if self.closed:
+            warn(WSGIWarning('iterated over closed app_iter'),
+                 stacklevel=2)
+        rv = self._next()
+        if not self.headers_set:
+            warn(WSGIWarning('Application returned before it '
+                             'started the response'), stacklevel=2)
+        check_string('application iterator items', rv)
+        self.chunks.append(len(rv))
+        return rv
+
+    def close(self):
+        self.closed = True
+        if hasattr(self._iterator, 'close'):
+            self._iterator.close()
+
+        if self.headers_set:
+            status_code, headers = self.headers_set
+            bytes_sent = sum(self.chunks)
+            content_length = headers.get('content-length', type=int)
+
+            if status_code == 304:
+                for key, value in headers:
+                    key = key.lower()
+                    if key not in ('expires', 'content-location') and \
+                       is_entity_header(key):
+                        warn(HTTPWarning('entity header %r found in 304 '
+                            'response' % key))
+                if bytes_sent:
+                    warn(HTTPWarning('304 responses must not have a body'))
+            elif 100 <= status_code < 200 or status_code == 204:
+                if content_length != 0:
+                    warn(HTTPWarning('%r responses must have an empty '
+                                     'content length') % status_code)
+                if bytes_sent:
+                    warn(HTTPWarning('%r responses must not have a body' %
+                                     status_code))
+            elif content_length is not None and content_length != bytes_sent:
+                warn(WSGIWarning('Content-Length and the number of bytes '
+                                 'sent to the client do not match.'))
+
+    def __del__(self):
+        if not self.closed:
+            try:
+                warn(WSGIWarning('Iterator was garbage collected before '
+                                 'it was closed.'))
+            except:
+                pass
+
+
+class LintMiddleware(object):
+    """This middleware wraps an application and warns on common errors.
+    Among other thing it currently checks for the following problems:
+
+    -   invalid status codes
+    -   non-bytestrings sent to the WSGI server
+    -   strings returned from the WSGI application
+    -   non-empty conditional responses
+    -   unquoted etags
+    -   relative URLs in the Location header
+    -   unsafe calls to wsgi.input
+    -   unclosed iterators
+
+    Detected errors are emitted using the standard Python :mod:`warnings`
+    system and usually end up on :data:`stderr`.
+
+    ::
+
+        from werkzeug.contrib.lint import LintMiddleware
+        app = LintMiddleware(app)
+
+    :param app: the application to wrap
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def check_environ(self, environ):
+        if type(environ) is not dict:
+            warn(WSGIWarning('WSGI environment is not a standard python dict.'),
+                 stacklevel=4)
+        for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
+                    'wsgi.version', 'wsgi.input', 'wsgi.errors',
+                    'wsgi.multithread', 'wsgi.multiprocess',
+                    'wsgi.run_once'):
+            if key not in environ:
+                warn(WSGIWarning('required environment key %r not found'
+                     % key), stacklevel=3)
+        if environ['wsgi.version'] != (1, 0):
+            warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
+                 stacklevel=3)
+
+        script_name = environ.get('SCRIPT_NAME', '')
+        if script_name and script_name[:1] != '/':
+            warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
+                             % script_name), stacklevel=3)
+        path_info = environ.get('PATH_INFO', '')
+        if path_info[:1] != '/':
+            warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
+                             % path_info), stacklevel=3)
+
+
+    def check_start_response(self, status, headers, exc_info):
+        check_string('status', status)
+        status_code = status.split(None, 1)[0]
+        if len(status_code) != 3 or not status_code.isdigit():
+            warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
+        if len(status) < 4 or status[3] != ' ':
+            warn(WSGIWarning('Invalid value for status %r.  Valid '
+                             'status strings are three digits, a space '
+                             'and a status explanation'), stacklevel=3)
+        status_code = int(status_code)
+        if status_code < 100:
+            warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
+
+        if type(headers) is not list:
+            warn(WSGIWarning('header list is not a list'), stacklevel=3)
+        for item in headers:
+            if type(item) is not tuple or len(item) != 2:
+                warn(WSGIWarning('Headers must tuple 2-item tuples'),
+                     stacklevel=3)
+            name, value = item
+            if type(name) is not str or type(value) is not str:
+                warn(WSGIWarning('header items must be strings'),
+                     stacklevel=3)
+            if name.lower() == 'status':
+                warn(WSGIWarning('The status header is not supported due to '
+                                 'conflicts with the CGI spec.'),
+                                 stacklevel=3)
+
+        if exc_info is not None and not isinstance(exc_info, tuple):
+            warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
+
+        headers = Headers(headers)
+        self.check_headers(headers)
+
+        return status_code, headers
+
+    def check_headers(self, headers):
+        etag = headers.get('etag')
+        if etag is not None:
+            if etag.startswith('w/'):
+                etag = etag[2:]
+            if not (etag[:1] == etag[-1:] == '"'):
+                warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
+
+        location = headers.get('location')
+        if location is not None:
+            if not urlparse(location).netloc:
+                warn(HTTPWarning('absolute URLs required for location header'),
+                     stacklevel=4)
+
+    def check_iterator(self, app_iter):
+        if isinstance(app_iter, basestring):
+            warn(WSGIWarning('application returned string.  Response will '
+                             'send character for character to the client '
+                             'which will kill the performance.  Return a '
+                             'list or iterable instead.'), stacklevel=3)
+
+    def __call__(self, *args, **kwargs):
+        if len(args) != 2:
+            warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
+        if kwargs:
+            warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
+                 stacklevel=2)
+        environ, start_response = args
+
+        self.check_environ(environ)
+        environ['wsgi.input'] = InputStream(environ['wsgi.input'])
+        environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
+
+        # hook our own file wrapper in so that applications will always
+        # iterate to the end and we can check the content length
+        environ['wsgi.file_wrapper'] = FileWrapper
+
+        headers_set = []
+        chunks = []
+
+        def checking_start_response(*args, **kwargs):
+            if len(args) not in (2, 3):
+                warn(WSGIWarning('Invalid number of arguments: %s, expected '
+                     '2 or 3' % len(args), stacklevel=2))
+            if kwargs:
+                warn(WSGIWarning('no keyword arguments allowed.'))
+
+            status, headers = args[:2]
+            if len(args) == 3:
+                exc_info = args[2]
+            else:
+                exc_info = None
+
+            headers_set[:] = self.check_start_response(status, headers,
+                                                       exc_info)
+            return GuardedWrite(start_response(status, headers, exc_info),
+                                chunks)
+
+        app_iter = self.app(environ, checking_start_response)
+        self.check_iterator(app_iter)
+        return GuardedIterator(app_iter, headers_set, chunks)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/profiler.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.profiler
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides a simple WSGI profiler middleware for finding
+    bottlenecks in web application.  It uses the :mod:`profile` or
+    :mod:`cProfile` module to do the profiling and writes the stats to the
+    stream provided (defaults to stderr).
+
+    Example usage::
+
+        from werkzeug.contrib.profiler import ProfilerMiddleware
+        app = ProfilerMiddleware(app)
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+try:
+    try:
+        from cProfile import Profile
+    except ImportError:
+        from profile import Profile
+    from pstats import Stats
+    available = True
+except ImportError:
+    available = False
+
+
+class MergeStream(object):
+    """An object that redirects `write` calls to multiple streams.
+    Use this to log to both `sys.stdout` and a file::
+
+        f = open('profiler.log', 'w')
+        stream = MergeStream(sys.stdout, f)
+        profiler = ProfilerMiddleware(app, stream)
+    """
+
+    def __init__(self, *streams):
+        if not streams:
+            raise TypeError('at least one stream must be given')
+        self.streams = streams
+
+    def write(self, data):
+        for stream in self.streams:
+            stream.write(data)
+
+
+class ProfilerMiddleware(object):
+    """Simple profiler middleware.  Wraps a WSGI application and profiles
+    a request.  This intentionally buffers the response so that timings are
+    more exact.
+
+    For the exact meaning of `sort_by` and `restrictions` consult the
+    :mod:`profile` documentation.
+
+    :param app: the WSGI application to profile.
+    :param stream: the stream for the profiled stats.  defaults to stderr.
+    :param sort_by: a tuple of columns to sort the result by.
+    :param restrictions: a tuple of profiling strictions.
+    """
+
+    def __init__(self, app, stream=None,
+                 sort_by=('time', 'calls'), restrictions=()):
+        if not available:
+            raise RuntimeError('the profiler is not available because '
+                               'profile or pstat is not installed.')
+        self._app = app
+        self._stream = stream or sys.stdout
+        self._sort_by = sort_by
+        self._restrictions = restrictions
+
+    def __call__(self, environ, start_response):
+        response_body = []
+
+        def catching_start_response(status, headers, exc_info=None):
+            start_response(status, headers, exc_info)
+            return response_body.append
+
+        def runapp():
+            appiter = self._app(environ, catching_start_response)
+            response_body.extend(appiter)
+            if hasattr(appiter, 'close'):
+                appiter.close()
+
+        p = Profile()
+        p.runcall(runapp)
+        body = ''.join(response_body)
+        stats = Stats(p, stream=self._stream)
+        stats.sort_stats(*self._sort_by)
+
+        self._stream.write('-' * 80)
+        self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
+        stats.print_stats(*self._restrictions)
+        self._stream.write('-' * 80 + '\n\n')
+
+        return [body]
+
+
+def make_action(app_factory, hostname='localhost', port=5000,
+                threaded=False, processes=1, stream=None,
+                sort_by=('time', 'calls'), restrictions=()):
+    """Return a new callback for :mod:`werkzeug.script` that starts a local
+    server with the profiler enabled::
+
+        from werkzeug.contrib import profiler
+        action_profile = profiler.make_action(make_app)
+    """
+    def action(hostname=('h', hostname), port=('p', port),
+               threaded=threaded, processes=processes):
+        """Start a new development server."""
+        from werkzeug.serving import run_simple
+        app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
+        run_simple(hostname, port, app, False, None, threaded, processes)
+    return action
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/securecookie.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,328 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.securecookie
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module implements a cookie that is not alterable from the client
+    because it adds a checksum the server checks for.  You can use it as
+    session replacement if all you have is a user id or something to mark
+    a logged in user.
+
+    Keep in mind that the data is still readable from the client as a
+    normal cookie is.  However you don't have to store and flush the
+    sessions you have at the server.
+
+    Example usage:
+
+    >>> from werkzeug.contrib.securecookie import SecureCookie
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+
+    Dumping into a string so that one can store it in a cookie:
+
+    >>> value = x.serialize()
+
+    Loading from that string again:
+
+    >>> x = SecureCookie.unserialize(value, "deadbeef")
+    >>> x["baz"]
+    (1, 2, 3)
+
+    If someone modifies the cookie and the checksum is wrong the unserialize
+    method will fail silently and return a new empty `SecureCookie` object.
+
+    Keep in mind that the values will be visible in the cookie so do not
+    store data in a cookie you don't want the user to see.
+
+    Application Integration
+    =======================
+
+    If you are using the werkzeug request objects you could integrate the
+    secure cookie into your application like this::
+
+        from werkzeug import BaseRequest, cached_property
+        from werkzeug.contrib.securecookie import SecureCookie
+
+        # don't use this key but a different one; you could just use
+        # os.urandom(20) to get something random
+        SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
+
+        class Request(BaseRequest):
+
+            @cached_property
+            def client_session(self):
+                data = self.cookies.get('session_data')
+                if not data:
+                    return SecureCookie(secret_key=SECRET_KEY)
+                return SecureCookie.unserialize(data, SECRET_KEY)
+
+        def application(environ, start_response):
+            request = Request(environ, start_response)
+
+            # get a response object here
+            response = ...
+
+            if request.client_session.should_save:
+                session_data = request.client_session.serialize()
+                response.set_cookie('session_data', session_data,
+                                    httponly=True)
+            return response(environ, start_response)
+
+    A less verbose integration can be achieved by using shorthand methods::
+
+        class Request(BaseRequest):
+
+            @cached_property
+            def client_session(self):
+                return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
+
+        def application(environ, start_response):
+            request = Request(environ, start_response)
+
+            # get a response object here
+            response = ...
+
+            request.client_session.save_cookie(response)
+            return response(environ, start_response)
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+import cPickle as pickle
+from hmac import new as hmac
+from datetime import datetime
+from time import time, mktime, gmtime
+from werkzeug import url_quote_plus, url_unquote_plus
+from werkzeug._internal import _date_to_unix
+from werkzeug.contrib.sessions import ModificationTrackingDict
+
+
+# rather ugly way to import the correct hash method.  Because
+# hmac either accepts modules with a new method (sha, md5 etc.)
+# or a hashlib factory function we have to figure out what to
+# pass to it.  If we have 2.5 or higher (so not 2.4 with a
+# custom hashlib) we import from hashlib and fail if it does
+# not exist (have seen that in old OS X versions).
+# in all other cases the now deprecated sha module is used.
+_default_hash = None
+if sys.version_info >= (2, 5):
+    try:
+        from hashlib import sha1 as _default_hash
+    except ImportError:
+        pass
+if _default_hash is None:
+    import sha as _default_hash
+
+
+class UnquoteError(Exception):
+    """Internal exception used to signal failures on quoting."""
+
+
+class SecureCookie(ModificationTrackingDict):
+    """Represents a secure cookie.  You can subclass this class and provide
+    an alternative mac method.  The import thing is that the mac method
+    is a function with a similar interface to the hashlib.  Required
+    methods are update() and digest().
+
+    Example usage:
+
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+    >>> x["foo"]
+    42
+    >>> x["baz"]
+    (1, 2, 3)
+    >>> x["blafasel"] = 23
+    >>> x.should_save
+    True
+
+    :param data: the initial data.  Either a dict, list of tuples or `None`.
+    :param secret_key: the secret key.  If not set `None` or not specified
+                       it has to be set before :meth:`serialize` is called.
+    :param new: The initial value of the `new` flag.
+    """
+
+    #: The hash method to use.  This has to be a module with a new function
+    #: or a function that creates a hashlib object.  Such as `hashlib.md5`
+    #: Subclasses can override this attribute.  The default hash is sha1.
+    hash_method = _default_hash
+
+    #: the module used for serialization.  Unless overriden by subclasses
+    #: the standard pickle module is used.
+    serialization_method = pickle
+
+    #: if the contents should be base64 quoted.  This can be disabled if the
+    #: serialization process returns cookie safe strings only.
+    quote_base64 = True
+
+    def __init__(self, data=None, secret_key=None, new=True):
+        ModificationTrackingDict.__init__(self, data or ())
+        # explicitly convert it into a bytestring because python 2.6
+        # no longer performs an implicit string conversion on hmac
+        if secret_key is not None:
+            secret_key = str(secret_key)
+        self.secret_key = secret_key
+        self.new = new
+
+    def __repr__(self):
+        return '<%s %s%s>' % (
+            self.__class__.__name__,
+            dict.__repr__(self),
+            self.should_save and '*' or ''
+        )
+
+    @property
+    def should_save(self):
+        """True if the session should be saved.  By default this is only true
+        for :attr:`modified` cookies, not :attr:`new`.
+        """
+        return self.modified
+
+    @classmethod
+    def quote(cls, value):
+        """Quote the value for the cookie.  This can be any object supported
+        by :attr:`serialization_method`.
+
+        :param value: the value to quote.
+        """
+        if cls.serialization_method is not None:
+            value = cls.serialization_method.dumps(value)
+        if cls.quote_base64:
+            value = ''.join(value.encode('base64').splitlines()).strip()
+        return value
+
+    @classmethod
+    def unquote(cls, value):
+        """Unquote the value for the cookie.  If unquoting does not work a
+        :exc:`UnquoteError` is raised.
+
+        :param value: the value to unquote.
+        """
+        try:
+            if cls.quote_base64:
+                value = value.decode('base64')
+            if cls.serialization_method is not None:
+                value = cls.serialization_method.loads(value)
+            return value
+        except:
+            # unfortunately pickle and other serialization modules can
+            # cause pretty every error here.  if we get one we catch it
+            # and convert it into an UnquoteError
+            raise UnquoteError()
+
+    def serialize(self, expires=None):
+        """Serialize the secure cookie into a string.
+
+        If expires is provided, the session will be automatically invalidated
+        after expiration when you unseralize it. This provides better
+        protection against session cookie theft.
+
+        :param expires: an optional expiration date for the cookie (a
+                        :class:`datetime.datetime` object)
+        """
+        if self.secret_key is None:
+            raise RuntimeError('no secret key defined')
+        if expires:
+            self['_expires'] = _date_to_unix(expires)
+        result = []
+        mac = hmac(self.secret_key, None, self.hash_method)
+        for key, value in sorted(self.items()):
+            result.append('%s=%s' % (
+                url_quote_plus(key),
+                self.quote(value)
+            ))
+            mac.update('|' + result[-1])
+        return '%s?%s' % (
+            mac.digest().encode('base64').strip(),
+            '&'.join(result)
+        )
+
+    @classmethod
+    def unserialize(cls, string, secret_key):
+        """Load the secure cookie from a serialized string.
+
+        :param string: the cookie value to unserialize.
+        :param secret_key: the secret key used to serialize the cookie.
+        :return: a new :class:`SecureCookie`.
+        """
+        if isinstance(string, unicode):
+            string = string.encode('utf-8', 'ignore')
+        try:
+            base64_hash, data = string.split('?', 1)
+        except (ValueError, IndexError):
+            items = ()
+        else:
+            items = {}
+            mac = hmac(secret_key, None, cls.hash_method)
+            for item in data.split('&'):
+                mac.update('|' + item)
+                if not '=' in item:
+                    items = None
+                    break
+                key, value = item.split('=', 1)
+                # try to make the key a string
+                key = url_unquote_plus(key)
+                try:
+                    key = str(key)
+                except UnicodeError:
+                    pass
+                items[key] = value
+
+            # no parsing error and the mac looks okay, we can now
+            # sercurely unpickle our cookie.
+            try:
+                client_hash = base64_hash.decode('base64')
+            except Exception:
+                items = client_hash = None
+            if items is not None and client_hash == mac.digest():
+                try:
+                    for key, value in items.iteritems():
+                        items[key] = cls.unquote(value)
+                except UnquoteError:
+                    items = ()
+                else:
+                    if '_expires' in items:
+                        if time() > items['_expires']:
+                            items = ()
+                        else:
+                            del items['_expires']
+            else:
+                items = ()
+        return cls(items, secret_key, False)
+
+    @classmethod
+    def load_cookie(cls, request, key='session', secret_key=None):
+        """Loads a :class:`SecureCookie` from a cookie in request.  If the
+        cookie is not set, a new :class:`SecureCookie` instanced is
+        returned.
+
+        :param request: a request object that has a `cookies` attribute
+                        which is a dict of all cookie values.
+        :param key: the name of the cookie.
+        :param secret_key: the secret key used to unquote the cookie.
+                           Always provide the value even though it has
+                           no default!
+        """
+        data = request.cookies.get(key)
+        if not data:
+            return cls(secret_key=secret_key)
+        return cls.unserialize(data, secret_key)
+
+    def save_cookie(self, response, key='session', expires=None,
+                    session_expires=None, max_age=None, path='/', domain=None,
+                    secure=None, httponly=False, force=False):
+        """Saves the SecureCookie in a cookie on response object.  All
+        parameters that are not described here are forwarded directly
+        to :meth:`~BaseResponse.set_cookie`.
+
+        :param response: a response object that has a
+                         :meth:`~BaseResponse.set_cookie` method.
+        :param key: the name of the cookie.
+        :param session_expires: the expiration date of the secure cookie
+                                stored information.  If this is not provided
+                                the cookie `expires` date is used instead.
+        """
+        if force or self.should_save:
+            data = self.serialize(session_expires or expires)
+            response.set_cookie(key, data, expires=expires, max_age=max_age,
+                                path=path, domain=domain, secure=secure,
+                                httponly=httponly)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/sessions.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,342 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.sessions
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module contains some helper classes that help one to add session
+    support to a python WSGI application.  For full client-side session
+    storage see :mod:`~werkzeug.contrib.securecookie` which implements a
+    secure, client-side session storage.
+
+
+    Application Integration
+    =======================
+
+    ::
+
+        from werkzeug.contrib.sessions import SessionMiddleware, \
+             FilesystemSessionStore
+
+        app = SessionMiddleware(app, FilesystemSessionStore())
+
+    The current session will then appear in the WSGI environment as
+    `werkzeug.session`.  However it's recommended to not use the middleware
+    but the stores directly in the application.  However for very simple
+    scripts a middleware for sessions could be sufficient.
+
+    This module does not implement methods or ways to check if a session is
+    expired.  That should be done by a cronjob and storage specific.  For
+    example to prune unused filesystem sessions one could check the modified
+    time of the files.  It sessions are stored in the database the new()
+    method should add an expiration timestamp for the session.
+
+    For better flexibility it's recommended to not use the middleware but the
+    store and session object directly in the application dispatching::
+
+        session_store = FilesystemSessionStore()
+
+        def application(environ, start_response):
+            request = Request(environ)
+            sid = request.cookies.get('cookie_name')
+            if sid is None:
+                request.session = session_store.new()
+            else:
+                request.session = session_store.get(sid)
+            response = get_the_response_object(request)
+            if request.session.should_save:
+                session_store.save(request.session)
+                response.set_cookie('cookie_name', request.session.sid)
+            return response(environ, start_response)
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import os
+import sys
+import tempfile
+from os import path
+from time import time
+from random import random
+try:
+    from hashlib import sha1
+except ImportError:
+    from sha import new as sha1
+from cPickle import dump, load, HIGHEST_PROTOCOL
+
+from werkzeug import ClosingIterator, dump_cookie, parse_cookie, CallbackDict
+from werkzeug.posixemulation import rename
+
+
+_sha1_re = re.compile(r'^[a-f0-9]{40}$')
+
+
+def _urandom():
+    if hasattr(os, 'urandom'):
+        return os.urandom(30)
+    return random()
+
+
+def generate_key(salt=None):
+    return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest()
+
+
+class ModificationTrackingDict(CallbackDict):
+    __slots__ = ('modified',)
+
+    def __init__(self, *args, **kwargs):
+        def on_update(self):
+            self.modified = True
+        self.modified = False
+        CallbackDict.__init__(self, on_update=on_update)
+        dict.update(self, *args, **kwargs)
+
+    def copy(self):
+        """Create a flat copy of the dict."""
+        missing = object()
+        result = object.__new__(self.__class__)
+        for name in self.__slots__:
+            val = getattr(self, name, missing)
+            if val is not missing:
+                setattr(result, name, val)
+        return result
+
+    def __copy__(self):
+        return self.copy()
+
+
+class Session(ModificationTrackingDict):
+    """Subclass of a dict that keeps track of direct object changes.  Changes
+    in mutable structures are not tracked, for those you have to set
+    `modified` to `True` by hand.
+    """
+    __slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
+
+    def __init__(self, data, sid, new=False):
+        ModificationTrackingDict.__init__(self, data)
+        self.sid = sid
+        self.new = new
+
+    def __repr__(self):
+        return '<%s %s%s>' % (
+            self.__class__.__name__,
+            dict.__repr__(self),
+            self.should_save and '*' or ''
+        )
+
+    @property
+    def should_save(self):
+        """True if the session should be saved.
+
+        .. versionchanged:: 0.6
+           By default the session is now only saved if the session is
+           modified, not if it is new like it was before.
+        """
+        return self.modified
+
+
+class SessionStore(object):
+    """Baseclass for all session stores.  The Werkzeug contrib module does not
+    implement any useful stores besides the filesystem store, application
+    developers are encouraged to create their own stores.
+
+    :param session_class: The session class to use.  Defaults to
+                          :class:`Session`.
+    """
+
+    def __init__(self, session_class=None):
+        if session_class is None:
+            session_class = Session
+        self.session_class = session_class
+
+    def is_valid_key(self, key):
+        """Check if a key has the correct format."""
+        return _sha1_re.match(key) is not None
+
+    def generate_key(self, salt=None):
+        """Simple function that generates a new session key."""
+        return generate_key(salt)
+
+    def new(self):
+        """Generate a new session."""
+        return self.session_class({}, self.generate_key(), True)
+
+    def save(self, session):
+        """Save a session."""
+
+    def save_if_modified(self, session):
+        """Save if a session class wants an update."""
+        if session.should_save:
+            self.save(session)
+
+    def delete(self, session):
+        """Delete a session."""
+
+    def get(self, sid):
+        """Get a session for this sid or a new session object.  This method
+        has to check if the session key is valid and create a new session if
+        that wasn't the case.
+        """
+        return self.session_class({}, sid, True)
+
+
+#: used for temporary files by the filesystem session store
+_fs_transaction_suffix = '.__wz_sess'
+
+
+class FilesystemSessionStore(SessionStore):
+    """Simple example session store that saves sessions on the filesystem.
+    This store works best on POSIX systems and Windows Vista / Windows
+    Server 2008 and newer.
+
+    .. versionchanged:: 0.6
+       `renew_missing` was added.  Previously this was considered `True`,
+       now the default changed to `False` and it can be explicitly
+       deactivated.
+
+    :param path: the path to the folder used for storing the sessions.
+                 If not provided the default temporary directory is used.
+    :param filename_template: a string template used to give the session
+                              a filename.  ``%s`` is replaced with the
+                              session id.
+    :param session_class: The session class to use.  Defaults to
+                          :class:`Session`.
+    :param renew_missing: set to `True` if you want the store to
+                          give the user a new sid if the session was
+                          not yet saved.
+    """
+
+    def __init__(self, path=None, filename_template='werkzeug_%s.sess',
+                 session_class=None, renew_missing=False, mode=0644):
+        SessionStore.__init__(self, session_class)
+        if path is None:
+            path = tempfile.gettempdir()
+        self.path = path
+        if isinstance(filename_template, unicode):
+            filename_template = filename_template.encode(
+                sys.getfilesystemencoding() or 'utf-8')
+        assert not filename_template.endswith(_fs_transaction_suffix), \
+            'filename templates may not end with %s' % _fs_transaction_suffix
+        self.filename_template = filename_template
+        self.renew_missing = renew_missing
+        self.mode = mode
+
+    def get_session_filename(self, sid):
+        # out of the box, this should be a strict ASCII subset but
+        # you might reconfigure the session object to have a more
+        # arbitrary string.
+        if isinstance(sid, unicode):
+            sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
+        return path.join(self.path, self.filename_template % sid)
+
+    def save(self, session):
+        fn = self.get_session_filename(session.sid)
+        fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
+                                   dir=self.path)
+        f = os.fdopen(fd, 'wb')
+        try:
+            dump(dict(session), f, HIGHEST_PROTOCOL)
+        finally:
+            f.close()
+        try:
+            rename(tmp, fn)
+            os.chmod(fn, self.mode)
+        except (IOError, OSError):
+            pass
+
+    def delete(self, session):
+        fn = self.get_session_filename(session.sid)
+        try:
+            os.unlink(fn)
+        except OSError:
+            pass
+
+    def get(self, sid):
+        if not self.is_valid_key(sid):
+            return self.new()
+        try:
+            f = open(self.get_session_filename(sid), 'rb')
+        except IOError:
+            if self.renew_missing:
+                return self.new()
+            data = {}
+        else:
+            try:
+                try:
+                    data = load(f)
+                except Exception:
+                    data = {}
+            finally:
+                f.close()
+        return self.session_class(data, sid, False)
+
+    def list(self):
+        """Lists all sessions in the store.
+
+        .. versionadded:: 0.6
+        """
+        before, after = self.filename_template.split('%s', 1)
+        filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
+                                                    re.escape(after)))
+        result = []
+        for filename in os.listdir(self.path):
+            #: this is a session that is still being saved.
+            if filename.endswith(_fs_transaction_suffix):
+                continue
+            match = filename_re.match(filename)
+            if match is not None:
+                result.append(match.group(1))
+        return result
+
+
+class SessionMiddleware(object):
+    """A simple middleware that puts the session object of a store provided
+    into the WSGI environ.  It automatically sets cookies and restores
+    sessions.
+
+    However a middleware is not the preferred solution because it won't be as
+    fast as sessions managed by the application itself and will put a key into
+    the WSGI environment only relevant for the application which is against
+    the concept of WSGI.
+
+    The cookie parameters are the same as for the :func:`~werkzeug.dump_cookie`
+    function just prefixed with ``cookie_``.  Additionally `max_age` is
+    called `cookie_age` and not `cookie_max_age` because of backwards
+    compatibility.
+    """
+
+    def __init__(self, app, store, cookie_name='session_id',
+                 cookie_age=None, cookie_expires=None, cookie_path='/',
+                 cookie_domain=None, cookie_secure=None,
+                 cookie_httponly=False, environ_key='werkzeug.session'):
+        self.app = app
+        self.store = store
+        self.cookie_name = cookie_name
+        self.cookie_age = cookie_age
+        self.cookie_expires = cookie_expires
+        self.cookie_path = cookie_path
+        self.cookie_domain = cookie_domain
+        self.cookie_secure = cookie_secure
+        self.cookie_httponly = cookie_httponly
+        self.environ_key = environ_key
+
+    def __call__(self, environ, start_response):
+        cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
+        sid = cookie.get(self.cookie_name, None)
+        if sid is None:
+            session = self.store.new()
+        else:
+            session = self.store.get(sid)
+        environ[self.environ_key] = session
+
+        def injecting_start_response(status, headers, exc_info=None):
+            if session.should_save:
+                self.store.save(session)
+                headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
+                                session.sid, self.cookie_age,
+                                self.cookie_expires, self.cookie_path,
+                                self.cookie_domain, self.cookie_secure,
+                                self.cookie_httponly)))
+            return start_response(status, headers, exc_info)
+        return ClosingIterator(self.app(environ, injecting_start_response),
+                               lambda: self.store.save_if_modified(session))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/testtools.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.testtools
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module implements extended wrappers for simplified testing.
+
+    `TestResponse`
+        A response wrapper which adds various cached attributes for
+        simplified assertions on various content types.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from werkzeug import Response, cached_property, import_string
+
+
+class ContentAccessors(object):
+    """
+    A mixin class for response objects that provides a couple of useful
+    accessors for unittesting.
+    """
+
+    def xml(self):
+        """Get an etree if possible."""
+        if 'xml' not in self.mimetype:
+            raise AttributeError(
+                'Not a XML response (Content-Type: %s)'
+                % self.mimetype)
+        for module in ['xml.etree.ElementTree', 'ElementTree',
+                       'elementtree.ElementTree']:
+            etree = import_string(module, silent=True)
+            if etree is not None:
+                return etree.XML(self.body)
+        raise RuntimeError('You must have ElementTree installed '
+                           'to use TestResponse.xml')
+    xml = cached_property(xml)
+
+    def lxml(self):
+        """Get an lxml etree if possible."""
+        if ('html' not in self.mimetype and 'xml' not in self.mimetype):
+            raise AttributeError('Not an HTML/XML response')
+        from lxml import etree
+        try:
+            from lxml.html import fromstring
+        except ImportError:
+            fromstring = etree.HTML
+        if self.mimetype=='text/html':
+            return fromstring(self.data)
+        return etree.XML(self.data)
+    lxml = cached_property(lxml)
+
+    def json(self):
+        """Get the result of simplejson.loads if possible."""
+        if 'json' not in self.mimetype:
+            raise AttributeError('Not a JSON response')
+        try:
+            from simplejson import loads
+        except:
+            from json import loads
+        return loads(self.data)
+    json = cached_property(json)
+
+
+class TestResponse(Response, ContentAccessors):
+    """Pass this to `werkzeug.test.Client` for easier unittesting."""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/contrib/wrappers.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.wrappers
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Extra wrappers or mixins contributed by the community.  These wrappers can
+    be mixed in into request objects to add extra functionality.
+
+    Example::
+
+        from werkzeug import Request as RequestBase
+        from werkzeug.contrib.wrappers import JSONRequestMixin
+
+        class Request(RequestBase, JSONRequestMixin):
+            pass
+
+    Afterwards this request object provides the extra functionality of the
+    :class:`JSONRequestMixin`.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import codecs
+from werkzeug.exceptions import BadRequest
+from werkzeug.utils import cached_property
+from werkzeug.http import dump_options_header, parse_options_header
+from werkzeug._internal import _decode_unicode
+try:
+    from simplejson import loads
+except ImportError:
+    from json import loads
+
+
+def is_known_charset(charset):
+    """Checks if the given charset is known to Python."""
+    try:
+        codecs.lookup(charset)
+    except LookupError:
+        return False
+    return True
+
+
+class JSONRequestMixin(object):
+    """Add json method to a request object.  This will parse the input data
+    through simplejson if possible.
+
+    :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+    is not json or if the data itself cannot be parsed as json.
+    """
+
+    @cached_property
+    def json(self):
+        """Get the result of simplejson.loads if possible."""
+        if 'json' not in self.environ.get('CONTENT_TYPE', ''):
+            raise BadRequest('Not a JSON request')
+        try:
+            return loads(self.data)
+        except Exception:
+            raise BadRequest('Unable to read JSON request')
+
+
+class ProtobufRequestMixin(object):
+    """Add protobuf parsing method to a request object.  This will parse the
+    input data through `protobuf`_ if possible.
+
+    :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+    is not protobuf or if the data itself cannot be parsed property.
+
+    .. _protobuf: http://code.google.com/p/protobuf/
+    """
+
+    #: by default the :class:`ProtobufRequestMixin` will raise a
+    #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
+    #: initialized.  You can bypass that check by setting this
+    #: attribute to `False`.
+    protobuf_check_initialization = True
+
+    def parse_protobuf(self, proto_type):
+        """Parse the data into an instance of proto_type."""
+        if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
+            raise BadRequest('Not a Protobuf request')
+
+        obj = proto_type()
+        try:
+            obj.ParseFromString(self.data)
+        except Exception:
+            raise BadRequest("Unable to parse Protobuf request")
+
+        # Fail if not all required fields are set
+        if self.protobuf_check_initialization and not obj.IsInitialized():
+            raise BadRequest("Partial Protobuf request")
+
+        return obj
+
+
+class RoutingArgsRequestMixin(object):
+    """This request mixin adds support for the wsgiorg routing args
+    `specification`_.
+
+    .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
+    """
+
+    def _get_routing_args(self):
+        return self.environ.get('wsgiorg.routing_args', (()))[0]
+
+    def _set_routing_args(self, value):
+        if self.shallow:
+            raise RuntimeError('A shallow request tried to modify the WSGI '
+                               'environment.  If you really want to do that, '
+                               'set `shallow` to False.')
+        self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
+
+    routing_args = property(_get_routing_args, _set_routing_args, doc='''
+        The positional URL arguments as `tuple`.''')
+    del _get_routing_args, _set_routing_args
+
+    def _get_routing_vars(self):
+        rv = self.environ.get('wsgiorg.routing_args')
+        if rv is not None:
+            return rv[1]
+        rv = {}
+        if not self.shallow:
+            self.routing_vars = rv
+        return rv
+
+    def _set_routing_vars(self, value):
+        if self.shallow:
+            raise RuntimeError('A shallow request tried to modify the WSGI '
+                               'environment.  If you really want to do that, '
+                               'set `shallow` to False.')
+        self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
+
+    routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
+        The keyword URL arguments as `dict`.''')
+    del _get_routing_vars, _set_routing_vars
+
+
+class ReverseSlashBehaviorRequestMixin(object):
+    """This mixin reverses the trailing slash behavior of :attr:`script_root`
+    and :attr:`path`.  This makes it possible to use :func:`~urlparse.urljoin`
+    directly on the paths.
+
+    Because it changes the behavior or :class:`Request` this class has to be
+    mixed in *before* the actual request class::
+
+        class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
+            pass
+
+    This example shows the differences (for an application mounted on
+    `/application` and the request going to `/application/foo/bar`):
+
+        +---------------+-------------------+---------------------+
+        |               | normal behavior   | reverse behavior    |
+        +===============+===================+=====================+
+        | `script_root` | ``/application``  | ``/application/``   |
+        +---------------+-------------------+---------------------+
+        | `path`        | ``/foo/bar``      | ``foo/bar``         |
+        +---------------+-------------------+---------------------+
+    """
+
+    @cached_property
+    def path(self):
+        """Requested path as unicode.  This works a bit like the regular path
+        info in the WSGI environment but will not include a leading slash.
+        """
+        path = (self.environ.get('PATH_INFO') or '').lstrip('/')
+        return _decode_unicode(path, self.charset, self.encoding_errors)
+
+    @cached_property
+    def script_root(self):
+        """The root path of the script includling a trailing slash."""
+        path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/') + '/'
+        return _decode_unicode(path, self.charset, self.encoding_errors)
+
+
+class DynamicCharsetRequestMixin(object):
+    """"If this mixin is mixed into a request class it will provide
+    a dynamic `charset` attribute.  This means that if the charset is
+    transmitted in the content type headers it's used from there.
+
+    Because it changes the behavior or :class:`Request` this class has
+    to be mixed in *before* the actual request class::
+
+        class MyRequest(DynamicCharsetRequestMixin, Request):
+            pass
+
+    By default the request object assumes that the URL charset is the
+    same as the data charset.  If the charset varies on each request
+    based on the transmitted data it's not a good idea to let the URLs
+    change based on that.  Most browsers assume either utf-8 or latin1
+    for the URLs if they have troubles figuring out.  It's strongly
+    recommended to set the URL charset to utf-8::
+
+        class MyRequest(DynamicCharsetRequestMixin, Request):
+            url_charset = 'utf-8'
+
+    .. versionadded:: 0.6
+    """
+
+    #: the default charset that is assumed if the content type header
+    #: is missing or does not contain a charset parameter.  The default
+    #: is latin1 which is what HTTP specifies as default charset.
+    #: You may however want to set this to utf-8 to better support
+    #: browsers that do not transmit a charset for incoming data.
+    default_charset = 'latin1'
+
+    def unknown_charset(self, charset):
+        """Called if a charset was provided but is not supported by
+        the Python codecs module.  By default latin1 is assumed then
+        to not lose any information, you may override this method to
+        change the behavior.
+
+        :param charset: the charset that was not found.
+        :return: the replacement charset.
+        """
+        return 'latin1'
+
+    @cached_property
+    def charset(self):
+        """The charset from the content type."""
+        header = self.environ.get('CONTENT_TYPE')
+        if header:
+            ct, options = parse_options_header(header)
+            charset = options.get('charset')
+            if charset:
+                if is_known_charset(charset):
+                    return charset
+                return self.unknown_charset(charset)
+        return self.default_charset
+
+
+class DynamicCharsetResponseMixin(object):
+    """If this mixin is mixed into a response class it will provide
+    a dynamic `charset` attribute.  This means that if the charset is
+    looked up and stored in the `Content-Type` header and updates
+    itself automatically.  This also means a small performance hit but
+    can be useful if you're working with different charsets on
+    responses.
+
+    Because the charset attribute is no a property at class-level, the
+    default value is stored in `default_charset`.
+
+    Because it changes the behavior or :class:`Response` this class has
+    to be mixed in *before* the actual response class::
+
+        class MyResponse(DynamicCharsetResponseMixin, Response):
+            pass
+
+    .. versionadded:: 0.6
+    """
+
+    #: the default charset.
+    default_charset = 'utf-8'
+
+    def _get_charset(self):
+        header = self.headers.get('content-type')
+        if header:
+            charset = parse_options_header(header)[1].get('charset')
+            if charset:
+                return charset
+        return self.default_charset
+
+    def _set_charset(self, charset):
+        header = self.headers.get('content-type')
+        ct, options = parse_options_header(header)
+        if not ct:
+            raise TypeError('Cannot set charset if Content-Type '
+                            'header is missing.')
+        options['charset'] = charset
+        self.headers['Content-Type'] = dump_options_header(ct, options)
+
+    charset = property(_get_charset, _set_charset, doc="""
+        The charset for the response.  It's stored inside the
+        Content-Type header as a parameter.""")
+    del _get_charset, _set_charset
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/datastructures.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,2331 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.datastructures
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides mixins and classes with an immutable interface.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import codecs
+import mimetypes
+
+from werkzeug._internal import _proxy_repr, _missing, _empty_stream
+
+
+_locale_delim_re = re.compile(r'[_-]')
+
+
+def is_immutable(self):
+    raise TypeError('%r objects are immutable' % self.__class__.__name__)
+
+
+def iter_multi_items(mapping):
+    """Iterates over the items of a mapping yielding keys and values
+    without dropping any from more complex structures.
+    """
+    if isinstance(mapping, MultiDict):
+        for item in mapping.iteritems(multi=True):
+            yield item
+    elif isinstance(mapping, dict):
+        for key, value in mapping.iteritems():
+            if isinstance(value, (tuple, list)):
+                for value in value:
+                    yield key, value
+            else:
+                yield key, value
+    else:
+        for item in mapping:
+            yield item
+
+
+class ImmutableListMixin(object):
+    """Makes a :class:`list` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (list(self),)
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def __delslice__(self, i, j):
+        is_immutable(self)
+
+    def __iadd__(self, other):
+        is_immutable(self)
+    __imul__ = __iadd__
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def __setslice__(self, i, j, value):
+        is_immutable(self)
+
+    def append(self, item):
+        is_immutable(self)
+    remove = append
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def reverse(self):
+        is_immutable(self)
+
+    def sort(self, cmp=None, key=None, reverse=None):
+        is_immutable(self)
+
+
+class ImmutableList(ImmutableListMixin, list):
+    """An immutable :class:`list`.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    __repr__ = _proxy_repr(list)
+
+
+class ImmutableDictMixin(object):
+    """Makes a :class:`dict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (dict(self),)
+
+    def setdefault(self, key, default=None):
+        is_immutable(self)
+
+    def update(self, *args, **kwargs):
+        is_immutable(self)
+
+    def pop(self, key, default=None):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def clear(self):
+        is_immutable(self)
+
+
+class ImmutableMultiDictMixin(ImmutableDictMixin):
+    """Makes a :class:`MultiDict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (self.items(multi=True),)
+
+    def add(self, key, value):
+        is_immutable(self)
+
+    def popitemlist(self):
+        is_immutable(self)
+
+    def poplist(self, key):
+        is_immutable(self)
+
+    def setlist(self, key, new_list):
+        is_immutable(self)
+
+    def setlistdefault(self, key, default_list=None):
+        is_immutable(self)
+
+
+class UpdateDictMixin(object):
+    """Makes dicts call `self.on_update` on modifications.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    on_update = None
+
+    def calls_update(name):
+        def oncall(self, *args, **kw):
+            rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
+            if self.on_update is not None:
+                self.on_update(self)
+            return rv
+        oncall.__name__ = name
+        return oncall
+
+    __setitem__ = calls_update('__setitem__')
+    __delitem__ = calls_update('__delitem__')
+    clear = calls_update('clear')
+    pop = calls_update('pop')
+    popitem = calls_update('popitem')
+    setdefault = calls_update('setdefault')
+    update = calls_update('update')
+    del calls_update
+
+
+class TypeConversionDict(dict):
+    """Works like a regular dict but the :meth:`get` method can perform
+    type conversions.  :class:`MultiDict` and :class:`CombinedMultiDict`
+    are subclasses of this class and provide the same feature.
+
+    .. versionadded:: 0.5
+    """
+
+    def get(self, key, default=None, type=None):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = TypeConversionDict(foo='42', bar='blub')
+        >>> d.get('foo', type=int)
+        42
+        >>> d.get('bar', -1, type=int)
+        -1
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        """
+        try:
+            rv = self[key]
+            if type is not None:
+                rv = type(rv)
+        except (KeyError, ValueError):
+            rv = default
+        return rv
+
+
+class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
+    """Works like a :class:`TypeConversionDict` but does not support
+    modifications.
+
+    .. versionadded:: 0.5
+    """
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return TypeConversionDict(self)
+
+    def __copy__(self):
+        return self
+
+
+class MultiDict(TypeConversionDict):
+    """A :class:`MultiDict` is a dictionary subclass customized to deal with
+    multiple values for the same key which is for example used by the parsing
+    functions in the wrappers.  This is necessary because some HTML form
+    elements pass multiple values for the same key.
+
+    :class:`MultiDict` implements all standard dictionary methods.
+    Internally, it saves all values for a key as a list, but the standard dict
+    access methods will only return the first value for a key. If you want to
+    gain access to the other values, too, you have to use the `list` methods as
+    explained below.
+
+    Basic Usage:
+
+    >>> d = MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d
+    MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d['a']
+    'b'
+    >>> d.getlist('a')
+    ['b', 'c']
+    >>> 'a' in d
+    True
+
+    It behaves like a normal dict thus all dict functions will only return the
+    first value when multiple values for one key are found.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+    exceptions.
+
+    A :class:`MultiDict` can be constructed from an iterable of
+    ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
+    onwards some keyword parameters.
+
+    :param mapping: the initial value for the :class:`MultiDict`.  Either a
+                    regular dict, an iterable of ``(key, value)`` tuples
+                    or `None`.
+    """
+
+    # the key error this class raises.  Because of circular dependencies
+    # with the http exception module this class is created at the end of
+    # this module.
+    KeyError = None
+
+    def __init__(self, mapping=None):
+        if isinstance(mapping, MultiDict):
+            dict.__init__(self, ((k, l[:]) for k, l in mapping.iterlists()))
+        elif isinstance(mapping, dict):
+            tmp = {}
+            for key, value in mapping.iteritems():
+                if isinstance(value, (tuple, list)):
+                    value = list(value)
+                else:
+                    value = [value]
+                tmp[key] = value
+            dict.__init__(self, tmp)
+        else:
+            tmp = {}
+            for key, value in mapping or ():
+                tmp.setdefault(key, []).append(value)
+            dict.__init__(self, tmp)
+
+    def __getstate__(self):
+        return dict(self.lists())
+
+    def __setstate__(self, value):
+        dict.clear(self)
+        dict.update(self, value)
+
+    def __iter__(self):
+        return self.iterkeys()
+
+    def __getitem__(self, key):
+        """Return the first data value for this key;
+        raises KeyError if not found.
+
+        :param key: The key to be looked up.
+        :raise KeyError: if the key does not exist.
+        """
+        if key in self:
+            return dict.__getitem__(self, key)[0]
+        raise self.KeyError(key)
+
+    def __setitem__(self, key, value):
+        """Like :meth:`add` but removes an existing key first.
+
+        :param key: the key for the value.
+        :param value: the value to set.
+        """
+        dict.__setitem__(self, key, [value])
+
+    def add(self, key, value):
+        """Adds a new value for the key.
+
+        .. versionadded:: 0.6
+
+        :param key: the key for the value.
+        :param value: the value to add.
+        """
+        dict.setdefault(self, key, []).append(value)
+
+    def getlist(self, key, type=None):
+        """Return the list of items for a given key. If that key is not in the
+        `MultiDict`, the return value will be an empty list.  Just as `get`
+        `getlist` accepts a `type` parameter.  All items will be converted
+        with the callable defined there.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        """
+        try:
+            rv = dict.__getitem__(self, key)
+        except KeyError:
+            return []
+        if type is None:
+            return list(rv)
+        result = []
+        for item in rv:
+            try:
+                result.append(type(item))
+            except ValueError:
+                pass
+        return result
+
+    def setlist(self, key, new_list):
+        """Remove the old values for a key and add new ones.  Note that the list
+        you pass the values in will be shallow-copied before it is inserted in
+        the dictionary.
+
+        >>> d = MultiDict()
+        >>> d.setlist('foo', ['1', '2'])
+        >>> d['foo']
+        '1'
+        >>> d.getlist('foo')
+        ['1', '2']
+
+        :param key: The key for which the values are set.
+        :param new_list: An iterable with the new values for the key.  Old values
+                         are removed first.
+        """
+        dict.__setitem__(self, key, list(new_list))
+
+    def setdefault(self, key, default=None):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key not in self:
+            self[key] = default
+        else:
+            default = self[key]
+        return default
+
+    def setlistdefault(self, key, default_list=None):
+        """Like `setdefault` but sets multiple values.  The list returned
+        is not a copy, but the list that is actually used internally.  This
+        means that you can put new values into the dict by appending items
+        to the list:
+
+        >>> d = MultiDict({"foo": 1})
+        >>> d.setlistdefault("foo").extend([2, 3])
+        >>> d.getlist("foo")
+        [1, 2, 3]
+
+        :param key: The key to be looked up.
+        :param default: An iterable of default values.  It is either copied
+                        (in case it was a list) or converted into a list
+                        before returned.
+        :return: a :class:`list`
+        """
+        if key not in self:
+            default_list = list(default_list or ())
+            dict.__setitem__(self, key, default_list)
+        else:
+            default_list = dict.__getitem__(self, key)
+        return default_list
+
+    def items(self, multi=False):
+        """Return a list of ``(key, value)`` pairs.
+
+        :param multi: If set to `True` the list returned will have a
+                      pair for each value of each key.  Otherwise it
+                      will only contain pairs for the first value of
+                      each key.
+
+        :return: a :class:`list`
+        """
+        return list(self.iteritems(multi))
+
+    def lists(self):
+        """Return a list of ``(key, value)`` pairs, where values is the list of
+        all values associated with the key.
+
+        :return: a :class:`list`
+        """
+        return list(self.iterlists())
+
+    def values(self):
+        """Returns a list of the first value on every key's value list.
+
+        :return: a :class:`list`.
+        """
+        return [self[key] for key in self.iterkeys()]
+
+    def listvalues(self):
+        """Return a list of all values associated with a key.  Zipping
+        :meth:`keys` and this is the same as calling :meth:`lists`:
+
+        >>> d = MultiDict({"foo": [1, 2, 3]})
+        >>> zip(d.keys(), d.listvalues()) == d.lists()
+        True
+
+        :return: a :class:`list`
+        """
+        return list(self.iterlistvalues())
+
+    def iteritems(self, multi=False):
+        """Like :meth:`items` but returns an iterator."""
+        for key, values in dict.iteritems(self):
+            if multi:
+                for value in values:
+                    yield key, value
+            else:
+                yield key, values[0]
+
+    def iterlists(self):
+        """Return a list of all values associated with a key.
+
+        :return: a class:`list`
+        """
+        for key, values in dict.iteritems(self):
+            yield key, list(values)
+
+    def itervalues(self):
+        """Like :meth:`values` but returns an iterator."""
+        for values in dict.itervalues(self):
+            yield values[0]
+
+    def iterlistvalues(self):
+        """like :meth:`listvalues` but returns an iterator."""
+        for values in dict.itervalues(self):
+            yield list(values)
+
+    def copy(self):
+        """Return a shallow copy of this object."""
+        return self.__class__(self)
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first value for each key.
+        :return: a :class:`dict`
+        """
+        if flat:
+            return dict(self.iteritems())
+        return dict(self.lists())
+
+    def update(self, other_dict):
+        """update() extends rather than replaces existing key lists."""
+        for key, value in iter_multi_items(other_dict):
+            MultiDict.add(self, key, value)
+
+    def pop(self, key, default=_missing):
+        """Pop the first item for a list on the dict.  Afterwards the
+        key is removed from the dict, so additional values are discarded:
+
+        >>> d = MultiDict({"foo": [1, 2, 3]})
+        >>> d.pop("foo")
+        1
+        >>> "foo" in d
+        False
+
+        :param key: the key to pop.
+        :param default: if provided the value to return if the key was
+                        not in the dictionary.
+        """
+        try:
+            return dict.pop(self, key)[0]
+        except KeyError, e:
+            if default is not _missing:
+                return default
+            raise self.KeyError(str(e))
+
+    def popitem(self):
+        """Pop an item from the dict."""
+        try:
+            item = dict.popitem(self)
+            return (item[0], item[1][0])
+        except KeyError, e:
+            raise self.KeyError(str(e))
+
+    def poplist(self, key):
+        """Pop the list for a key from the dict.  If the key is not in the dict
+        an empty list is returned.
+
+        .. versionchanged:: 0.5
+           If the key does no longer exist a list is returned instead of
+           raising an error.
+        """
+        return dict.pop(self, key, [])
+
+    def popitemlist(self):
+        """Pop a ``(key, list)`` tuple from the dict."""
+        try:
+            return dict.popitem(self)
+        except KeyError, e:
+            raise self.KeyError(str(e))
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, self.items(multi=True))
+
+
+class _omd_bucket(object):
+    """Wraps values in the :class:`OrderedMultiDict`.  This makes it
+    possible to keep an order over multiple different keys.  It requires
+    a lot of extra memory and slows down access a lot, but makes it
+    possible to access elements in O(1) and iterate in O(n).
+    """
+    __slots__ = ('prev', 'key', 'value', 'next')
+
+    def __init__(self, omd, key, value):
+        self.prev = omd._last_bucket
+        self.key = key
+        self.value = value
+        self.next = None
+
+        if omd._first_bucket is None:
+            omd._first_bucket = self
+        if omd._last_bucket is not None:
+            omd._last_bucket.next = self
+        omd._last_bucket = self
+
+    def unlink(self, omd):
+        if self.prev:
+            self.prev.next = self.next
+        if self.next:
+            self.next.prev = self.prev
+        if omd._first_bucket is self:
+            omd._first_bucket = self.next
+        if omd._last_bucket is self:
+            omd._last_bucket = self.prev
+
+
+class OrderedMultiDict(MultiDict):
+    """Works like a regular :class:`MultiDict` but preserves the
+    order of the fields.  To convert the ordered multi dict into a
+    list you can use the :meth:`items` method and pass it ``multi=True``.
+
+    In general an :class:`OrderedMultiDict` is an order of magnitude
+    slower than a :class:`MultiDict`.
+
+    .. admonition:: note
+
+       Due to a limitation in Python you cannot convert an ordered
+       multi dict into a regular dict by using ``dict(multidict)``.
+       Instead you have to use the :meth:`to_dict` method, otherwise
+       the internal bucket objects are exposed.
+    """
+
+    # the key error this class raises.  Because of circular dependencies
+    # with the http exception module this class is created at the end of
+    # this module.
+    KeyError = None
+
+    def __init__(self, mapping=None):
+        dict.__init__(self)
+        self._first_bucket = self._last_bucket = None
+        if mapping is not None:
+            OrderedMultiDict.update(self, mapping)
+
+    def __eq__(self, other):
+        if not isinstance(other, MultiDict):
+            return NotImplemented
+        if isinstance(other, OrderedMultiDict):
+            iter1 = self.iteritems(multi=True)
+            iter2 = other.iteritems(multi=True)
+            try:
+                for k1, v1 in iter1:
+                    k2, v2 = iter2.next()
+                    if k1 != k2 or v1 != v2:
+                        return False
+            except StopIteration:
+                return False
+            try:
+                iter2.next()
+            except StopIteration:
+                return True
+            return False
+        if len(self) != len(other):
+            return False
+        for key, values in self.iterlists():
+            if other.getlist(key) != values:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (self.items(multi=True),)
+
+    def __getstate__(self):
+        return self.items(multi=True)
+
+    def __setstate__(self, values):
+        dict.clear(self)
+        for key, value in values:
+            self.add(key, value)
+
+    def __getitem__(self, key):
+        if key in self:
+            return dict.__getitem__(self, key)[0].value
+        raise self.KeyError(key)
+
+    def __setitem__(self, key, value):
+        self.poplist(key)
+        self.add(key, value)
+
+    def __delitem__(self, key):
+        self.pop(key)
+
+    def iterkeys(self):
+        return (key for key, value in self.iteritems())
+
+    def itervalues(self):
+        return (value for key, value in self.iteritems())
+
+    def iteritems(self, multi=False):
+        ptr = self._first_bucket
+        if multi:
+            while ptr is not None:
+                yield ptr.key, ptr.value
+                ptr = ptr.next
+        else:
+            returned_keys = set()
+            while ptr is not None:
+                if ptr.key not in returned_keys:
+                    returned_keys.add(ptr.key)
+                    yield ptr.key, ptr.value
+                ptr = ptr.next
+
+    def iterlists(self):
+        returned_keys = set()
+        ptr = self._first_bucket
+        while ptr is not None:
+            if ptr.key not in returned_keys:
+                yield ptr.key, self.getlist(ptr.key)
+                returned_keys.add(ptr.key)
+            ptr = ptr.next
+
+    def iterlistvalues(self):
+        for key, values in self.iterlists():
+            yield values
+
+    def add(self, key, value):
+        dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
+
+    def getlist(self, key, type=None):
+        try:
+            rv = dict.__getitem__(self, key)
+        except KeyError:
+            return []
+        if type is None:
+            return [x.value for x in rv]
+        result = []
+        for item in rv:
+            try:
+                result.append(type(item.value))
+            except ValueError:
+                pass
+        return result
+
+    def setlist(self, key, new_list):
+        self.poplist(key)
+        for value in new_list:
+            self.add(key, value)
+
+    def setlistdefault(self, key, default_list=None):
+        raise TypeError('setlistdefault is unsupported for '
+                        'ordered multi dicts')
+
+    def update(self, mapping):
+        for key, value in iter_multi_items(mapping):
+            OrderedMultiDict.add(self, key, value)
+
+    def poplist(self, key):
+        buckets = dict.pop(self, key, ())
+        for bucket in buckets:
+            bucket.unlink(self)
+        return [x.value for x in buckets]
+
+    def pop(self, key, default=_missing):
+        try:
+            buckets = dict.pop(self, key)
+        except KeyError, e:
+            if default is not _missing:
+                return default
+            raise self.KeyError(str(e))
+        for bucket in buckets:
+            bucket.unlink(self)
+        return buckets[0].value
+
+    def popitem(self):
+        try:
+            key, buckets = dict.popitem(self)
+        except KeyError, e:
+            raise self.KeyError(str(e))
+        for bucket in buckets:
+            bucket.unlink(self)
+        return key, buckets[0].value
+
+    def popitemlist(self):
+        try:
+            key, buckets = dict.popitem(self)
+        except KeyError, e:
+            raise self.KeyError(str(e))
+        for bucket in buckets:
+            bucket.unlink(self)
+        return key, [x.value for x in buckets]
+
+
+def _options_header_vkw(value, kw):
+    if not kw:
+        return value
+    return dump_options_header(value, dict((k.replace('_', '-'), v)
+                                            for k, v in kw.items()))
+
+
+class Headers(object):
+    """An object that stores some headers.  It has a dict-like interface
+    but is ordered and can store the same keys multiple times.
+
+    This data structure is useful if you want a nicer way to handle WSGI
+    headers which are stored as tuples in a list.
+
+    From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
+    also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
+    and will render a page for a ``400 BAD REQUEST`` if caught in a
+    catch-all for HTTP exceptions.
+
+    Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
+    class, with the exception of `__getitem__`.  :mod:`wsgiref` will return
+    `None` for ``headers['missing']``, whereas :class:`Headers` will raise
+    a :class:`KeyError`.
+
+    To create a new :class:`Headers` object pass it a list or dict of headers
+    which are used as default values.  This does not reuse the list passed
+    to the constructor for internal usage.  To create a :class:`Headers`
+    object that uses as internal storage the list or list-like object you
+    can use the :meth:`linked` class method.
+
+    :param defaults: The list of default values for the :class:`Headers`.
+    """
+
+    # the key error this class raises.  Because of circular dependencies
+    # with the http exception module this class is created at the end of
+    # this module.
+    KeyError = None
+
+    def __init__(self, defaults=None, _list=None):
+        if _list is None:
+            _list = []
+        self._list = _list
+        if defaults is not None:
+            if isinstance(defaults, (list, Headers)):
+                self._list.extend(defaults)
+            else:
+                self.extend(defaults)
+
+    @classmethod
+    def linked(cls, headerlist):
+        """Create a new :class:`Headers` object that uses the list of headers
+        passed as internal storage:
+
+        >>> headerlist = [('Content-Length', '40')]
+        >>> headers = Headers.linked(headerlist)
+        >>> headers['Content-Type'] = 'text/html'
+        >>> headerlist
+        [('Content-Length', '40'), ('Content-Type', 'text/html')]
+
+        :param headerlist: The list of headers the class is linked to.
+        :return: new linked :class:`Headers` object.
+        """
+        return cls(_list=headerlist)
+
+    def __getitem__(self, key, _index_operation=True):
+        if _index_operation:
+            if isinstance(key, (int, long)):
+                return self._list[key]
+            elif isinstance(key, slice):
+                return self.__class__(self._list[key])
+        ikey = key.lower()
+        for k, v in self._list:
+            if k.lower() == ikey:
+                return v
+        raise self.KeyError(key)
+
+    def __eq__(self, other):
+        return other.__class__ is self.__class__ and \
+               set(other._list) == set(self._list)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get(self, key, default=None, type=None):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = Headers([('Content-Length', '42')])
+        >>> d.get('Content-Length', type=int)
+        42
+
+        If a headers object is bound you must not add unicode strings
+        because no encoding takes place.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        """
+        try:
+            rv = self.__getitem__(key, _index_operation=False)
+        except KeyError:
+            return default
+        if type is None:
+            return rv
+        try:
+            return type(rv)
+        except ValueError:
+            return default
+
+    def getlist(self, key, type=None):
+        """Return the list of items for a given key. If that key is not in the
+        :class:`Headers`, the return value will be an empty list.  Just as
+        :meth:`get` :meth:`getlist` accepts a `type` parameter.  All items will
+        be converted with the callable defined there.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        """
+        ikey = key.lower()
+        result = []
+        for k, v in self:
+            if k.lower() == ikey:
+                if type is not None:
+                    try:
+                        v = type(v)
+                    except ValueError:
+                        continue
+                result.append(v)
+        return result
+
+    def get_all(self, name):
+        """Return a list of all the values for the named field.
+
+        This method is compatible with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.get_all` method.
+        """
+        return self.getlist(name)
+
+    def iteritems(self, lower=False):
+        for key, value in self:
+            if lower:
+                key = key.lower()
+            yield key, value
+
+    def iterkeys(self, lower=False):
+        for key, _ in self.iteritems(lower):
+            yield key
+
+    def itervalues(self):
+        for _, value in self.iteritems():
+            yield value
+
+    def keys(self, lower=False):
+        return list(self.iterkeys(lower))
+
+    def values(self):
+        return list(self.itervalues())
+
+    def items(self, lower=False):
+        return list(self.iteritems(lower))
+
+    def extend(self, iterable):
+        """Extend the headers with a dict or an iterable yielding keys and
+        values.
+        """
+        if isinstance(iterable, dict):
+            for key, value in iterable.iteritems():
+                if isinstance(value, (tuple, list)):
+                    for v in value:
+                        self.add(key, v)
+                else:
+                    self.add(key, value)
+        else:
+            for key, value in iterable:
+                self.add(key, value)
+
+    def __delitem__(self, key, _index_operation=True):
+        if _index_operation and isinstance(key, (int, long, slice)):
+            del self._list[key]
+            return
+        key = key.lower()
+        new = []
+        for k, v in self._list:
+            if k.lower() != key:
+                new.append((k, v))
+        self._list[:] = new
+
+    def remove(self, key):
+        """Remove a key.
+
+        :param key: The key to be removed.
+        """
+        return self.__delitem__(key, _index_operation=False)
+
+    def pop(self, key=None, default=_missing):
+        """Removes and returns a key or index.
+
+        :param key: The key to be popped.  If this is an integer the item at
+                    that position is removed, if it's a string the value for
+                    that key is.  If the key is omitted or `None` the last
+                    item is removed.
+        :return: an item.
+        """
+        if key is None:
+            return self._list.pop()
+        if isinstance(key, (int, long)):
+            return self._list.pop(key)
+        try:
+            rv = self[key]
+            self.remove(key)
+        except KeyError:
+            if default is not _missing:
+                return default
+            raise
+        return rv
+
+    def popitem(self):
+        """Removes a key or index and returns a (key, value) item."""
+        return self.pop()
+
+    def __contains__(self, key):
+        """Check if a key is present."""
+        try:
+            self.__getitem__(key, _index_operation=False)
+        except KeyError:
+            return False
+        return True
+
+    has_key = __contains__
+
+    def __iter__(self):
+        """Yield ``(key, value)`` tuples."""
+        return iter(self._list)
+
+    def __len__(self):
+        return len(self._list)
+
+    def add(self, _key, _value, **kw):
+        """Add a new header tuple to the list.
+
+        Keyword arguments can specify additional parameters for the header
+        value, with underscores converted to dashes::
+
+        >>> d = Headers()
+        >>> d.add('Content-Type', 'text/plain')
+        >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
+
+        The keyword argument dumping uses :func:`dump_options_header`
+        behind the scenes.
+
+        .. versionadded:: 0.4.1
+            keyword arguments were added for :mod:`wsgiref` compatibility.
+        """
+        self._list.append((_key, _options_header_vkw(_value, kw)))
+
+    def add_header(self, _key, _value, **_kw):
+        """Add a new header tuple to the list.
+
+        An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.add_header` method.
+        """
+        self.add(_key, _value, **_kw)
+
+    def clear(self):
+        """Clears all headers."""
+        del self._list[:]
+
+    def set(self, _key, _value, **kw):
+        """Remove all header tuples for `key` and add a new one.  The newly
+        added key either appears at the end of the list if there was no
+        entry or replaces the first one.
+
+        Keyword arguments can specify additional parameters for the header
+        value, with underscores converted to dashes.  See :meth:`add` for
+        more information.
+
+        .. versionchanged:: 0.6.1
+           :meth:`set` now accepts the same arguments as :meth:`add`.
+
+        :param key: The key to be inserted.
+        :param value: The value to be inserted.
+        """
+        lc_key = _key.lower()
+        _value = _options_header_vkw(_value, kw)
+        for idx, (old_key, old_value) in enumerate(self._list):
+            if old_key.lower() == lc_key:
+                # replace first ocurrence
+                self._list[idx] = (_key, _value)
+                break
+        else:
+            return self.add(_key, _value)
+        self._list[idx + 1:] = [(k, v) for k, v in self._list[idx + 1:]
+                                if k.lower() != lc_key]
+
+    def setdefault(self, key, value):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key in self:
+            return self[key]
+        self.set(key, value)
+        return value
+
+    def __setitem__(self, key, value):
+        """Like :meth:`set` but also supports index/slice based setting."""
+        if isinstance(key, (slice, int, long)):
+            self._list[key] = value
+        else:
+            self.set(key, value)
+
+    def to_list(self, charset='utf-8'):
+        """Convert the headers into a list and converts the unicode header
+        items to the specified charset.
+
+        :return: list
+        """
+        result = []
+        for k, v in self:
+            if isinstance(v, unicode):
+                v = v.encode(charset)
+            else:
+                v = str(v)
+            result.append((k, v))
+        return result
+
+    def copy(self):
+        return self.__class__(self._list)
+
+    def __copy__(self):
+        return self.copy()
+
+    def __str__(self, charset='utf-8'):
+        """Returns formatted headers suitable for HTTP transmission."""
+        strs = []
+        for key, value in self.to_list(charset):
+            strs.append('%s: %s' % (key, value))
+        strs.append('\r\n')
+        return '\r\n'.join(strs)
+
+    def __repr__(self):
+        return '%s(%r)' % (
+            self.__class__.__name__,
+            list(self)
+        )
+
+
+class ImmutableHeadersMixin(object):
+    """Makes a :class:`Headers` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+    set = __setitem__
+
+    def add(self, item):
+        is_immutable(self)
+    remove = add_header = add
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def setdefault(self, key, default):
+        is_immutable(self)
+
+
+class EnvironHeaders(ImmutableHeadersMixin, Headers):
+    """Read only version of the headers from a WSGI environment.  This
+    provides the same interface as `Headers` and is constructed from
+    a WSGI environment.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
+    HTTP exceptions.
+    """
+
+    def __init__(self, environ):
+        self.environ = environ
+
+    @classmethod
+    def linked(cls, environ):
+        raise TypeError('%r object is always linked to environment, '
+                        'no separate initializer' % cls.__name__)
+
+    def __eq__(self, other):
+        return self.environ is other.environ
+
+    def __getitem__(self, key, _index_operation=False):
+        # _index_operation is a no-op for this class as there is no index but
+        # used because get() calls it.
+        key = key.upper().replace('-', '_')
+        if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+            return self.environ[key]
+        return self.environ['HTTP_' + key]
+
+    def __len__(self):
+        # the iter is necessary because otherwise list calls our
+        # len which would call list again and so forth.
+        return len(list(iter(self)))
+
+    def __iter__(self):
+        for key, value in self.environ.iteritems():
+            if key.startswith('HTTP_') and key not in \
+               ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
+                yield key[5:].replace('_', '-').title(), value
+            elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+                yield key.replace('_', '-').title(), value
+
+    def copy(self):
+        raise TypeError('cannot create %r copies' % self.__class__.__name__)
+
+
+class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
+    instances as sequence and it will combine the return values of all wrapped
+    dicts:
+
+    >>> from werkzeug import MultiDict, CombinedMultiDict
+    >>> post = MultiDict([('foo', 'bar')])
+    >>> get = MultiDict([('blub', 'blah')])
+    >>> combined = CombinedMultiDict([get, post])
+    >>> combined['foo']
+    'bar'
+    >>> combined['blub']
+    'blah'
+
+    This works for all read operations and will raise a `TypeError` for
+    methods that usually change data which isn't possible.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+    exceptions.
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (self.dicts,)
+
+    def __init__(self, dicts=None):
+        self.dicts = dicts or []
+
+    @classmethod
+    def fromkeys(cls):
+        raise TypeError('cannot create %r instances by fromkeys' %
+                        cls.__name__)
+
+    def __getitem__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return d[key]
+        raise self.KeyError(key)
+
+    def get(self, key, default=None, type=None):
+        for d in self.dicts:
+            if key in d:
+                if type is not None:
+                    try:
+                        return type(d[key])
+                    except ValueError:
+                        continue
+                return d[key]
+        return default
+
+    def getlist(self, key, type=None):
+        rv = []
+        for d in self.dicts:
+            rv.extend(d.getlist(key, type))
+        return rv
+
+    def keys(self):
+        rv = set()
+        for d in self.dicts:
+            rv.update(d.keys())
+        return list(rv)
+
+    def iteritems(self, multi=False):
+        found = set()
+        for d in self.dicts:
+            for key, value in d.iteritems(multi):
+                if multi:
+                    yield key, value
+                elif key not in found:
+                    found.add(key)
+                    yield key, value
+
+    def itervalues(self):
+        for key, value in self.iteritems():
+            yield value
+
+    def values(self):
+        return list(self.itervalues())
+
+    def items(self, multi=False):
+        return list(self.iteritems(multi))
+
+    def iterlists(self):
+        rv = {}
+        for d in self.dicts:
+            for key, values in d.iterlists():
+                rv.setdefault(key, []).extend(values)
+        return rv.iteritems()
+
+    def lists(self):
+        return list(self.iterlists())
+
+    def iterlistvalues(self):
+        return (x[0] for x in self.lists())
+
+    def listvalues(self):
+        return list(self.iterlistvalues())
+
+    def iterkeys(self):
+        return iter(self.keys())
+
+    __iter__ = iterkeys
+
+    def copy(self):
+        """Return a shallow copy of this object."""
+        return self.__class__(self.dicts[:])
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first item for each key.
+        :return: a :class:`dict`
+        """
+        rv = {}
+        for d in reversed(self.dicts):
+            rv.update(d.to_dict(flat))
+        return rv
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __contains__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return True
+        return False
+
+    has_key = __contains__
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, self.dicts)
+
+
+class FileMultiDict(MultiDict):
+    """A special :class:`MultiDict` that has convenience methods to add
+    files to it.  This is used for :class:`EnvironBuilder` and generally
+    useful for unittesting.
+
+    .. versionadded:: 0.5
+    """
+
+    def add_file(self, name, file, filename=None, content_type=None):
+        """Adds a new file to the dict.  `file` can be a file name or
+        a :class:`file`-like or a :class:`FileStorage` object.
+
+        :param name: the name of the field.
+        :param file: a filename or :class:`file`-like object
+        :param filename: an optional filename
+        :param content_type: an optional content type
+        """
+        if isinstance(file, FileStorage):
+            self[name] = file
+            return
+        if isinstance(file, basestring):
+            if filename is None:
+                filename = file
+            file = open(file, 'rb')
+        if filename and content_type is None:
+            content_type = mimetypes.guess_type(filename)[0] or \
+                           'application/octet-stream'
+        self[name] = FileStorage(file, filename, name, content_type)
+
+
+class ImmutableDict(ImmutableDictMixin, dict):
+    """An immutable :class:`dict`.
+
+    .. versionadded:: 0.5
+    """
+
+    __repr__ = _proxy_repr(dict)
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return dict(self)
+
+    def __copy__(self):
+        return self
+
+
+class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """An immutable :class:`MultiDict`.
+
+    .. versionadded:: 0.5
+    """
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return MultiDict(self)
+
+    def __copy__(self):
+        return self
+
+
+class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
+    """An immutable :class:`OrderedMultiDict`.
+
+    .. versionadded:: 0.6
+    """
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return OrderedMultiDict(self)
+
+    def __copy__(self):
+        return self
+
+
+class Accept(ImmutableList):
+    """An :class:`Accept` object is just a list subclass for lists of
+    ``(value, quality)`` tuples.  It is automatically sorted by quality.
+
+    All :class:`Accept` objects work similar to a list but provide extra
+    functionality for working with the data.  Containment checks are
+    normalized to the rules of that header:
+
+    >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
+    >>> a.best
+    'ISO-8859-1'
+    >>> 'iso-8859-1' in a
+    True
+    >>> 'UTF8' in a
+    True
+    >>> 'utf7' in a
+    False
+
+    To get the quality for an item you can use normal item lookup:
+
+    >>> print a['utf-8']
+    0.7
+    >>> a['utf7']
+    0
+
+    .. versionchanged:: 0.5
+       :class:`Accept` objects are forced immutable now.
+    """
+
+    def __init__(self, values=()):
+        if values is None:
+            list.__init__(self)
+            self.provided = False
+        elif isinstance(values, Accept):
+            self.provided = values.provided
+            list.__init__(self, values)
+        else:
+            self.provided = True
+            values = [(a, b) for b, a in values]
+            values.sort()
+            values.reverse()
+            list.__init__(self, [(a, b) for b, a in values])
+
+    def _value_matches(self, value, item):
+        """Check if a value matches a given accept item."""
+        return item == '*' or item.lower() == value.lower()
+
+    def __getitem__(self, key):
+        """Besides index lookup (getting item n) you can also pass it a string
+        to get the quality for the item.  If the item is not in the list, the
+        returned quality is ``0``.
+        """
+        if isinstance(key, basestring):
+            return self.quality(key)
+        return list.__getitem__(self, key)
+
+    def quality(self, key):
+        """Returns the quality of the key.
+
+        .. versionadded:: 0.6
+           In previous versions you had to use the item-lookup syntax
+           (eg: ``obj[key]`` instead of ``obj.quality(key)``)
+        """
+        for item, quality in self:
+            if self._value_matches(key, item):
+                return quality
+        return 0
+
+    def __contains__(self, value):
+        for item, quality in self:
+            if self._value_matches(value, item):
+                return True
+        return False
+
+    def __repr__(self):
+        return '%s([%s])' % (
+            self.__class__.__name__,
+            ', '.join('(%r, %s)' % (x, y) for x, y in self)
+        )
+
+    def index(self, key):
+        """Get the position of an entry or raise :exc:`ValueError`.
+
+        :param key: The key to be looked up.
+
+        .. versionchanged:: 0.5
+           This used to raise :exc:`IndexError`, which was inconsistent
+           with the list API.
+        """
+        if isinstance(key, basestring):
+            for idx, (item, quality) in enumerate(self):
+                if self._value_matches(key, item):
+                    return idx
+            raise ValueError(key)
+        return list.index(self, key)
+
+    def find(self, key):
+        """Get the position of an entry or return -1.
+
+        :param key: The key to be looked up.
+        """
+        try:
+            return self.index(key)
+        except ValueError:
+            return -1
+
+    def values(self):
+        """Return a list of the values, not the qualities."""
+        return list(self.itervalues())
+
+    def itervalues(self):
+        """Iterate over all values."""
+        for item in self:
+            yield item[0]
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        result = []
+        for value, quality in self:
+            if quality != 1:
+                value = '%s;q=%s' % (value, quality)
+            result.append(value)
+        return ','.join(result)
+
+    def __str__(self):
+        return self.to_header()
+
+    def best_match(self, matches, default=None):
+        """Returns the best match from a list of possible matches based
+        on the quality of the client.  If two items have the same quality,
+        the one is returned that comes first.
+
+        :param matches: a list of matches to check for
+        :param default: the value that is returned if none match
+        """
+        best_quality = -1
+        result = default
+        for server_item in matches:
+            for client_item, quality in self:
+                if quality <= best_quality:
+                    break
+                if self._value_matches(client_item, server_item):
+                    best_quality = quality
+                    result = server_item
+        return result
+
+    @property
+    def best(self):
+        """The best match as value."""
+        if self:
+            return self[0][0]
+
+
+class MIMEAccept(Accept):
+    """Like :class:`Accept` but with special methods and behavior for
+    mimetypes.
+    """
+
+    def _value_matches(self, value, item):
+        def _normalize(x):
+            x = x.lower()
+            return x == '*' and ('*', '*') or x.split('/', 1)
+
+        # this is from the application which is trusted.  to avoid developer
+        # frustration we actually check these for valid values
+        if '/' not in value:
+            raise ValueError('invalid mimetype %r' % value)
+        value_type, value_subtype = _normalize(value)
+        if value_type == '*' and value_subtype != '*':
+            raise ValueError('invalid mimetype %r' % value)
+
+        if '/' not in item:
+            return False
+        item_type, item_subtype = _normalize(item)
+        if item_type == '*' and item_subtype != '*':
+            return False
+        return (
+            (item_type == item_subtype == '*' or
+             value_type == value_subtype == '*') or
+            (item_type == value_type and (item_subtype == '*' or
+                                          value_subtype == '*' or
+                                          item_subtype == value_subtype))
+        )
+
+    @property
+    def accept_html(self):
+        """True if this object accepts HTML."""
+        return (
+            'text/html' in self or
+            'application/xhtml+xml' in self or
+            self.accept_xhtml
+        )
+
+    @property
+    def accept_xhtml(self):
+        """True if this object accepts XHTML."""
+        return (
+            'application/xhtml+xml' in self or
+            'application/xml' in self
+        )
+
+
+class LanguageAccept(Accept):
+    """Like :class:`Accept` but with normalization for languages."""
+
+    def _value_matches(self, value, item):
+        def _normalize(language):
+            return _locale_delim_re.split(language.lower())
+        return item == '*' or _normalize(value) == _normalize(item)
+
+
+class CharsetAccept(Accept):
+    """Like :class:`Accept` but with normalization for charsets."""
+
+    def _value_matches(self, value, item):
+        def _normalize(name):
+            try:
+                return codecs.lookup(name).name
+            except LookupError:
+                return name.lower()
+        return item == '*' or _normalize(value) == _normalize(item)
+
+
+def cache_property(key, empty, type):
+    """Return a new property object for a cache header.  Useful if you
+    want to add support for a cache extension in a subclass."""
+    return property(lambda x: x._get_cache_value(key, empty, type),
+                    lambda x, v: x._set_cache_value(key, v, type),
+                    lambda x: x._del_cache_value(key),
+                    'accessor for %r' % key)
+
+
+class _CacheControl(UpdateDictMixin, dict):
+    """Subclass of a dict that stores values for a Cache-Control header.  It
+    has accessors for all the cache-control directives specified in RFC 2616.
+    The class does not differentiate between request and response directives.
+
+    Because the cache-control directives in the HTTP header use dashes the
+    python descriptors use underscores for that.
+
+    To get a header of the :class:`CacheControl` object again you can convert
+    the object into a string or call the :meth:`to_header` method.  If you plan
+    to subclass it and add your own items have a look at the sourcecode for
+    that class.
+
+    .. versionchanged:: 0.4
+
+       Setting `no_cache` or `private` to boolean `True` will set the implicit
+       none-value which is ``*``:
+
+       >>> cc = ResponseCacheControl()
+       >>> cc.no_cache = True
+       >>> cc
+       <ResponseCacheControl 'no-cache'>
+       >>> cc.no_cache
+       '*'
+       >>> cc.no_cache = None
+       >>> cc
+       <ResponseCacheControl ''>
+
+       In versions before 0.5 the behavior documented here affected the now
+       no longer existing `CacheControl` class.
+    """
+
+    no_cache = cache_property('no-cache', '*', None)
+    no_store = cache_property('no-store', None, bool)
+    max_age = cache_property('max-age', -1, int)
+    no_transform = cache_property('no-transform', None, None)
+
+    def __init__(self, values=(), on_update=None):
+        dict.__init__(self, values or ())
+        self.on_update = on_update
+        self.provided = values is not None
+
+    def _get_cache_value(self, key, empty, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            return key in self
+        if key in self:
+            value = self[key]
+            if value is None:
+                return empty
+            elif type is not None:
+                try:
+                    value = type(value)
+                except ValueError:
+                    pass
+            return value
+
+    def _set_cache_value(self, key, value, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            if value:
+                self[key] = None
+            else:
+                self.pop(key, None)
+        else:
+            if value is None:
+                self.pop(key)
+            elif value is True:
+                self[key] = None
+            else:
+                self[key] = value
+
+    def _del_cache_value(self, key):
+        """Used internally by the accessor properties."""
+        if key in self:
+            del self[key]
+
+    def to_header(self):
+        """Convert the stored values into a cache control header."""
+        return dump_header(self)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.to_header()
+        )
+
+
+class RequestCacheControl(ImmutableDictMixin, _CacheControl):
+    """A cache control for requests.  This is immutable and gives access
+    to all the request-relevant cache control headers.
+
+    To get a header of the :class:`RequestCacheControl` object again you can
+    convert the object into a string or call the :meth:`to_header` method.  If
+    you plan to subclass it and add your own items have a look at the sourcecode
+    for that class.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    max_stale = cache_property('max-stale', '*', int)
+    min_fresh = cache_property('min-fresh', '*', int)
+    no_transform = cache_property('no-transform', None, None)
+    only_if_cached = cache_property('only-if-cached', None, bool)
+
+
+class ResponseCacheControl(_CacheControl):
+    """A cache control for responses.  Unlike :class:`RequestCacheControl`
+    this is mutable and gives access to response-relevant cache control
+    headers.
+
+    To get a header of the :class:`ResponseCacheControl` object again you can
+    convert the object into a string or call the :meth:`to_header` method.  If
+    you plan to subclass it and add your own items have a look at the sourcecode
+    for that class.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    public = cache_property('public', None, bool)
+    private = cache_property('private', '*', None)
+    must_revalidate = cache_property('must-revalidate', None, bool)
+    proxy_revalidate = cache_property('proxy-revalidate', None, bool)
+    s_maxage = cache_property('s-maxage', None, None)
+
+
+# attach cache_property to the _CacheControl as staticmethod
+# so that others can reuse it.
+_CacheControl.cache_property = staticmethod(cache_property)
+
+
+class CallbackDict(UpdateDictMixin, dict):
+    """A dict that calls a function passed every time something is changed.
+    The function is passed the dict instance.
+    """
+
+    def __init__(self, initial=None, on_update=None):
+        dict.__init__(self, initial or ())
+        self.on_update = on_update
+
+    def __repr__(self):
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            dict.__repr__(self)
+        )
+
+
+class HeaderSet(object):
+    """Similar to the :class:`ETags` class this implements a set-like structure.
+    Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
+    content-language headers.
+
+    If not constructed using the :func:`parse_set_header` function the
+    instantiation works like this:
+
+    >>> hs = HeaderSet(['foo', 'bar', 'baz'])
+    >>> hs
+    HeaderSet(['foo', 'bar', 'baz'])
+    """
+
+    def __init__(self, headers=None, on_update=None):
+        self._headers = list(headers or ())
+        self._set = set([x.lower() for x in self._headers])
+        self.on_update = on_update
+
+    def add(self, header):
+        """Add a new header to the set."""
+        self.update((header,))
+
+    def remove(self, header):
+        """Remove a header from the set.  This raises an :exc:`KeyError` if the
+        header is not in the set.
+
+        .. versionchanged:: 0.5
+            In older versions a :exc:`IndexError` was raised instead of a
+            :exc:`KeyError` if the object was missing.
+
+        :param header: the header to be removed.
+        """
+        key = header.lower()
+        if key not in self._set:
+            raise KeyError(header)
+        self._set.remove(key)
+        for idx, key in enumerate(self._headers):
+            if key.lower() == header:
+                del self._headers[idx]
+                break
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def update(self, iterable):
+        """Add all the headers from the iterable to the set.
+
+        :param iterable: updates the set with the items from the iterable.
+        """
+        inserted_any = False
+        for header in iterable:
+            key = header.lower()
+            if key not in self._set:
+                self._headers.append(header)
+                self._set.add(key)
+                inserted_any = True
+        if inserted_any and self.on_update is not None:
+            self.on_update(self)
+
+    def discard(self, header):
+        """Like :meth:`remove` but ignores errors.
+
+        :param header: the header to be discarded.
+        """
+        try:
+            return self.remove(header)
+        except KeyError:
+            pass
+
+    def find(self, header):
+        """Return the index of the header in the set or return -1 if not found.
+
+        :param header: the header to be looked up.
+        """
+        header = header.lower()
+        for idx, item in enumerate(self._headers):
+            if item.lower() == header:
+                return idx
+        return -1
+
+    def index(self, header):
+        """Return the index of the header in the set or raise an
+        :exc:`IndexError`.
+
+        :param header: the header to be looked up.
+        """
+        rv = self.find(header)
+        if rv < 0:
+            raise IndexError(header)
+        return rv
+
+    def clear(self):
+        """Clear the set."""
+        self._set.clear()
+        del self._headers[:]
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def as_set(self, preserve_casing=False):
+        """Return the set as real python set type.  When calling this, all
+        the items are converted to lowercase and the ordering is lost.
+
+        :param preserve_casing: if set to `True` the items in the set returned
+                                will have the original case like in the
+                                :class:`HeaderSet`, otherwise they will
+                                be lowercase.
+        """
+        if preserve_casing:
+            return set(self._headers)
+        return set(self._set)
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        return ', '.join(map(quote_header_value, self._headers))
+
+    def __getitem__(self, idx):
+        return self._headers[idx]
+
+    def __delitem__(self, idx):
+        rv = self._headers.pop(idx)
+        self._set.remove(rv.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __setitem__(self, idx, value):
+        old = self._headers[idx]
+        self._set.remove(old.lower())
+        self._headers[idx] = value
+        self._set.add(value.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __contains__(self, header):
+        return header.lower() in self._set
+
+    def __len__(self):
+        return len(self._set)
+
+    def __iter__(self):
+        return iter(self._headers)
+
+    def __nonzero__(self):
+        return bool(self._set)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '%s(%r)' % (
+            self.__class__.__name__,
+            self._headers
+        )
+
+
+class ETags(object):
+    """A set that can be used to check if one etag is present in a collection
+    of etags.
+    """
+
+    def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
+        self._strong = frozenset(not star_tag and strong_etags or ())
+        self._weak = frozenset(weak_etags or ())
+        self.star_tag = star_tag
+
+    def as_set(self, include_weak=False):
+        """Convert the `ETags` object into a python set.  Per default all the
+        weak etags are not part of this set."""
+        rv = set(self._strong)
+        if include_weak:
+            rv.update(self._weak)
+        return rv
+
+    def is_weak(self, etag):
+        """Check if an etag is weak."""
+        return etag in self._weak
+
+    def contains_weak(self, etag):
+        """Check if an etag is part of the set including weak and strong tags."""
+        return self.is_weak(etag) or self.contains(etag)
+
+    def contains(self, etag):
+        """Check if an etag is part of the set ignoring weak tags."""
+        if self.star_tag:
+            return True
+        return etag in self._strong
+
+    def contains_raw(self, etag):
+        """When passed a quoted tag it will check if this tag is part of the
+        set.  If the tag is weak it is checked against weak and strong tags,
+        otherwise strong only."""
+        etag, weak = unquote_etag(etag)
+        if weak:
+            return self.contains_weak(etag)
+        return self.contains(etag)
+
+    def to_header(self):
+        """Convert the etags set into a HTTP header string."""
+        if self.star_tag:
+            return '*'
+        return ', '.join(
+            ['"%s"' % x for x in self._strong] +
+            ['w/"%s"' % x for x in self._weak]
+        )
+
+    def __call__(self, etag=None, data=None, include_weak=False):
+        if [etag, data].count(None) != 1:
+            raise TypeError('either tag or data required, but at least one')
+        if etag is None:
+            etag = generate_etag(data)
+        if include_weak:
+            if etag in self._weak:
+                return True
+        return etag in self._strong
+
+    def __nonzero__(self):
+        return bool(self.star_tag or self._strong)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __iter__(self):
+        return iter(self._strong)
+
+    def __contains__(self, etag):
+        return self.contains(etag)
+
+    def __repr__(self):
+        return '<%s %r>' % (self.__class__.__name__, str(self))
+
+
+class Authorization(ImmutableDictMixin, dict):
+    """Represents an `Authorization` header sent by the client.  You should
+    not create this kind of object yourself but use it when it's returned by
+    the `parse_authorization_header` function.
+
+    This object is a dict subclass and can be altered by setting dict items
+    but it should be considered immutable as it's returned by the client and
+    not meant for modifications.
+
+    .. versionchanged:: 0.5
+       This object became immutable.
+    """
+
+    def __init__(self, auth_type, data=None):
+        dict.__init__(self, data or {})
+        self.type = auth_type
+
+    username = property(lambda x: x.get('username'), doc='''
+        The username transmitted.  This is set for both basic and digest
+        auth all the time.''')
+    password = property(lambda x: x.get('password'), doc='''
+        When the authentication type is basic this is the password
+        transmitted by the client, else `None`.''')
+    realm = property(lambda x: x.get('realm'), doc='''
+        This is the server realm sent back for HTTP digest auth.''')
+    nonce = property(lambda x: x.get('nonce'), doc='''
+        The nonce the server sent for digest auth, sent back by the client.
+        A nonce should be unique for every 401 response for HTTP digest
+        auth.''')
+    uri = property(lambda x: x.get('uri'), doc='''
+        The URI from Request-URI of the Request-Line; duplicated because
+        proxies are allowed to change the Request-Line in transit.  HTTP
+        digest auth only.''')
+    nc = property(lambda x: x.get('nc'), doc='''
+        The nonce count value transmitted by clients if a qop-header is
+        also transmitted.  HTTP digest auth only.''')
+    cnonce = property(lambda x: x.get('cnonce'), doc='''
+        If the server sent a qop-header in the ``WWW-Authenticate``
+        header, the client has to provide this value for HTTP digest auth.
+        See the RFC for more details.''')
+    response = property(lambda x: x.get('response'), doc='''
+        A string of 32 hex digits computed as defined in RFC 2617, which
+        proves that the user knows a password.  Digest auth only.''')
+    opaque = property(lambda x: x.get('opaque'), doc='''
+        The opaque header from the server returned unchanged by the client.
+        It is recommended that this string be base64 or hexadecimal data.
+        Digest auth only.''')
+
+    @property
+    def qop(self):
+        """Indicates what "quality of protection" the client has applied to
+        the message for HTTP digest auth."""
+        def on_update(header_set):
+            if not header_set and 'qop' in self:
+                del self['qop']
+            elif header_set:
+                self['qop'] = header_set.to_header()
+        return parse_set_header(self.get('qop'), on_update)
+
+
+class WWWAuthenticate(UpdateDictMixin, dict):
+    """Provides simple access to `WWW-Authenticate` headers."""
+
+    #: list of keys that require quoting in the generated header
+    _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
+
+    def __init__(self, auth_type=None, values=None, on_update=None):
+        dict.__init__(self, values or ())
+        if auth_type:
+            self['__auth_type__'] = auth_type
+        self.on_update = on_update
+
+    def set_basic(self, realm='authentication required'):
+        """Clear the auth info and enable basic auth."""
+        dict.clear(self)
+        dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
+        if self.on_update:
+            self.on_update(self)
+
+    def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
+                   algorithm=None, stale=False):
+        """Clear the auth info and enable digest auth."""
+        d = {
+            '__auth_type__':    'digest',
+            'realm':            realm,
+            'nonce':            nonce,
+            'qop':              dump_header(qop)
+        }
+        if stale:
+            d['stale'] = 'TRUE'
+        if opaque is not None:
+            d['opaque'] = opaque
+        if algorithm is not None:
+            d['algorithm'] = algorithm
+        dict.clear(self)
+        dict.update(self, d)
+        if self.on_update:
+            self.on_update(self)
+
+    def to_header(self):
+        """Convert the stored values into a WWW-Authenticate header."""
+        d = dict(self)
+        auth_type = d.pop('__auth_type__', None) or 'basic'
+        return '%s %s' % (auth_type.title(), ', '.join([
+            '%s=%s' % (key, quote_header_value(value,
+                       allow_token=key not in self._require_quoting))
+            for key, value in d.iteritems()
+        ]))
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.to_header()
+        )
+
+    def auth_property(name, doc=None):
+        """A static helper function for subclasses to add extra authentication
+        system properties onto a class::
+
+            class FooAuthenticate(WWWAuthenticate):
+                special_realm = auth_property('special_realm')
+
+        For more information have a look at the sourcecode to see how the
+        regular properties (:attr:`realm` etc.) are implemented.
+        """
+        def _set_value(self, value):
+            if value is None:
+                self.pop(name, None)
+            else:
+                self[name] = str(value)
+        return property(lambda x: x.get(name), _set_value, doc=doc)
+
+    def _set_property(name, doc=None):
+        def fget(self):
+            def on_update(header_set):
+                if not header_set and name in self:
+                    del self[name]
+                elif header_set:
+                    self[name] = header_set.to_header()
+            return parse_set_header(self.get(name), on_update)
+        return property(fget, doc=doc)
+
+    type = auth_property('__auth_type__', doc='''
+        The type of the auth mechanism.  HTTP currently specifies
+        `Basic` and `Digest`.''')
+    realm = auth_property('realm', doc='''
+        A string to be displayed to users so they know which username and
+        password to use.  This string should contain at least the name of
+        the host performing the authentication and might additionally
+        indicate the collection of users who might have access.''')
+    domain = _set_property('domain', doc='''
+        A list of URIs that define the protection space.  If a URI is an
+        absolute path, it is relative to the canonical root URL of the
+        server being accessed.''')
+    nonce = auth_property('nonce', doc='''
+        A server-specified data string which should be uniquely generated
+        each time a 401 response is made.  It is recommended that this
+        string be base64 or hexadecimal data.''')
+    opaque = auth_property('opaque', doc='''
+        A string of data, specified by the server, which should be returned
+        by the client unchanged in the Authorization header of subsequent
+        requests with URIs in the same protection space.  It is recommended
+        that this string be base64 or hexadecimal data.''')
+    algorithm = auth_property('algorithm', doc='''
+        A string indicating a pair of algorithms used to produce the digest
+        and a checksum.  If this is not present it is assumed to be "MD5".
+        If the algorithm is not understood, the challenge should be ignored
+        (and a different one used, if there is more than one).''')
+    qop = _set_property('qop', doc='''
+        A set of quality-of-privacy directives such as auth and auth-int.''')
+
+    def _get_stale(self):
+        val = self.get('stale')
+        if val is not None:
+            return val.lower() == 'true'
+    def _set_stale(self, value):
+        if value is None:
+            self.pop('stale', None)
+        else:
+            self['stale'] = value and 'TRUE' or 'FALSE'
+    stale = property(_get_stale, _set_stale, doc='''
+        A flag, indicating that the previous request from the client was
+        rejected because the nonce value was stale.''')
+    del _get_stale, _set_stale
+
+    # make auth_property a staticmethod so that subclasses of
+    # `WWWAuthenticate` can use it for new properties.
+    auth_property = staticmethod(auth_property)
+    del _set_property
+
+
+class FileStorage(object):
+    """The :class:`FileStorage` class is a thin wrapper over incoming files.
+    It is used by the request object to represent uploaded files.  All the
+    attributes of the wrapper stream are proxied by the file storage so
+    it's possible to do ``storage.read()`` instead of the long form
+    ``storage.stream.read()``.
+    """
+
+    def __init__(self, stream=None, filename=None, name=None,
+                 content_type='application/octet-stream', content_length=-1,
+                 headers=None):
+        self.name = name
+        self.stream = stream or _empty_stream
+        self.filename = filename or getattr(stream, 'name', None)
+        self.content_type = content_type
+        self.content_length = content_length
+        if headers is None:
+            headers = Headers()
+        self.headers = headers
+
+    def save(self, dst, buffer_size=16384):
+        """Save the file to a destination path or file object.  If the
+        destination is a file object you have to close it yourself after the
+        call.  The buffer size is the number of bytes held in memory during
+        the copy process.  It defaults to 16KB.
+
+        For secure file saving also have a look at :func:`secure_filename`.
+
+        :param dst: a filename or open file object the uploaded file
+                    is saved to.
+        :param buffer_size: the size of the buffer.  This works the same as
+                            the `length` parameter of
+                            :func:`shutil.copyfileobj`.
+        """
+        from shutil import copyfileobj
+        close_dst = False
+        if isinstance(dst, basestring):
+            dst = file(dst, 'wb')
+            close_dst = True
+        try:
+            copyfileobj(self.stream, dst, buffer_size)
+        finally:
+            if close_dst:
+                dst.close()
+
+    def close(self):
+        """Close the underlying file if possible."""
+        try:
+            self.stream.close()
+        except:
+            pass
+
+    def __nonzero__(self):
+        return bool(self.filename)
+
+    def __getattr__(self, name):
+        return getattr(self.stream, name)
+
+    def __iter__(self):
+        return iter(self.readline, '')
+
+    def __repr__(self):
+        return '<%s: %r (%r)>' % (
+            self.__class__.__name__,
+            self.filename,
+            self.content_type
+        )
+
+
+# circular dependencies
+from werkzeug.http import dump_options_header, dump_header, generate_etag, \
+     quote_header_value, parse_set_header, unquote_etag
+
+
+# create all the special key errors now that the classes are defined.
+from werkzeug.exceptions import BadRequest
+for _cls in MultiDict, OrderedMultiDict, CombinedMultiDict, Headers, \
+            EnvironHeaders:
+    _cls.KeyError = BadRequest.wrap(KeyError, _cls.__name__ + '.KeyError')
+del _cls
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/__init__.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug
+    ~~~~~~~~~~~~~~
+
+    WSGI application traceback debugger.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import mimetypes
+from os.path import join, dirname, basename, isfile
+from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
+from werkzeug.debug.repr import debug_repr
+from werkzeug.debug.tbtools import get_current_traceback
+from werkzeug.debug.console import Console
+from werkzeug.debug.utils import render_template
+
+
+class _ConsoleFrame(object):
+    """Helper class so that we can reuse the frame console code for the
+    standalone console.
+    """
+
+    def __init__(self, namespace):
+        self.console = Console(namespace)
+        self.id = 0
+
+
+class DebuggedApplication(object):
+    """Enables debugging support for a given application::
+
+        from werkzeug.debug import DebuggedApplication
+        from myapp import app
+        app = DebuggedApplication(app, evalex=True)
+
+    The `evalex` keyword argument allows evaluating expressions in a
+    traceback's frame context.
+
+    :param app: the WSGI application to run debugged.
+    :param evalex: enable exception evaluation feature (interactive
+                   debugging).  This requires a non-forking server.
+    :param request_key: The key that points to the request object in ths
+                        environment.  This parameter is ignored in current
+                        versions.
+    :param console_path: the URL for a general purpose console.
+    :param console_init_func: the function that is executed before starting
+                              the general purpose console.  The return value
+                              is used as initial namespace.
+    :param show_hidden_frames: by default hidden traceback frames are skipped.
+                               You can show them by setting this parameter
+                               to `True`.
+    """
+
+    # this class is public
+    __module__ = 'werkzeug'
+
+    def __init__(self, app, evalex=False, request_key='werkzeug.request',
+                 console_path='/console', console_init_func=None,
+                 show_hidden_frames=False):
+        if not console_init_func:
+            console_init_func = dict
+        self.app = app
+        self.evalex = evalex
+        self.frames = {}
+        self.tracebacks = {}
+        self.request_key = request_key
+        self.console_path = console_path
+        self.console_init_func = console_init_func
+        self.show_hidden_frames = show_hidden_frames
+
+    def debug_application(self, environ, start_response):
+        """Run the application and conserve the traceback frames."""
+        app_iter = None
+        try:
+            app_iter = self.app(environ, start_response)
+            for item in app_iter:
+                yield item
+            if hasattr(app_iter, 'close'):
+                app_iter.close()
+        except:
+            if hasattr(app_iter, 'close'):
+                app_iter.close()
+            traceback = get_current_traceback(skip=1, show_hidden_frames=
+                                              self.show_hidden_frames,
+                                              ignore_system_exceptions=True)
+            for frame in traceback.frames:
+                self.frames[frame.id] = frame
+            self.tracebacks[traceback.id] = traceback
+
+            try:
+                start_response('500 INTERNAL SERVER ERROR', [
+                    ('Content-Type', 'text/html; charset=utf-8')
+                ])
+            except:
+                # if we end up here there has been output but an error
+                # occurred.  in that situation we can do nothing fancy any
+                # more, better log something into the error log and fall
+                # back gracefully.
+                environ['wsgi.errors'].write(
+                    'Debugging middleware caught exception in streamed '
+                    'response at a point where response headers were already '
+                    'sent.\n')
+            else:
+                yield traceback.render_full(evalex=self.evalex) \
+                               .encode('utf-8', 'replace')
+
+            traceback.log(environ['wsgi.errors'])
+
+    def execute_command(self, request, command, frame):
+        """Execute a command in a console."""
+        return Response(frame.console.eval(command), mimetype='text/html')
+
+    def display_console(self, request):
+        """Display a standalone shell."""
+        if 0 not in self.frames:
+            self.frames[0] = _ConsoleFrame(self.console_init_func())
+        return Response(render_template('console.html'), mimetype='text/html')
+
+    def paste_traceback(self, request, traceback):
+        """Paste the traceback and return a JSON response."""
+        paste_id = traceback.paste()
+        return Response('{"url": "http://paste.pocoo.org/show/%s/", "id": %s}'
+                        % (paste_id, paste_id), mimetype='application/json')
+
+    def get_source(self, request, frame):
+        """Render the source viewer."""
+        return Response(frame.render_source(), mimetype='text/html')
+
+    def get_resource(self, request, filename):
+        """Return a static resource from the shared folder."""
+        filename = join(dirname(__file__), 'shared', basename(filename))
+        if isfile(filename):
+            mimetype = mimetypes.guess_type(filename)[0] \
+                or 'application/octet-stream'
+            f = file(filename, 'rb')
+            try:
+                return Response(f.read(), mimetype=mimetype)
+            finally:
+                f.close()
+        return Response('Not Found', status=404)
+
+    def __call__(self, environ, start_response):
+        """Dispatch the requests."""
+        # important: don't ever access a function here that reads the incoming
+        # form data!  Otherwise the application won't have access to that data
+        # any more!
+        request = Request(environ)
+        response = self.debug_application
+        if self.evalex and self.console_path is not None and \
+           request.path == self.console_path:
+            response = self.display_console(request)
+        elif request.path.rstrip('/').endswith('/__debugger__'):
+            cmd = request.args.get('cmd')
+            arg = request.args.get('f')
+            traceback = self.tracebacks.get(request.args.get('tb', type=int))
+            frame = self.frames.get(request.args.get('frm', type=int))
+            if cmd == 'resource' and arg:
+                response = self.get_resource(request, arg)
+            elif cmd == 'paste' and traceback is not None:
+                response = self.paste_traceback(request, traceback)
+            elif cmd == 'source' and frame:
+                response = self.get_source(request, frame)
+            elif self.evalex and cmd is not None and frame is not None:
+                response = self.execute_command(request, cmd, frame)
+        return response(environ, start_response)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/console.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.console
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Interactive console support.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD.
+"""
+import sys
+import code
+from types import CodeType
+from werkzeug.utils import escape
+from werkzeug.local import Local
+from werkzeug.debug.repr import debug_repr, dump, helper
+from werkzeug.debug.utils import render_template
+
+
+_local = Local()
+
+
+class HTMLStringO(object):
+    """A StringO version that HTML escapes on write."""
+
+    def __init__(self):
+        self._buffer = []
+
+    def isatty(self):
+        return False
+
+    def close(self):
+        pass
+
+    def flush(self):
+        pass
+
+    def seek(self, n, mode=0):
+        raise IOError('Bad file descriptor')
+
+    def readline(self):
+        raise IOError('Bad file descriptor')
+
+    def reset(self):
+        val = ''.join(self._buffer)
+        del self._buffer[:]
+        return val
+
+    def _write(self, x):
+        if isinstance(x, str):
+            x = x.decode('utf-8', 'replace')
+        self._buffer.append(x)
+
+    def write(self, x):
+        self._write(escape(x))
+
+    def writelines(self, x):
+        self._write(escape(''.join(x)))
+
+
+class ThreadedStream(object):
+    """Thread-local wrapper for sys.stdout for the interactive console."""
+
+    def push():
+        if not isinstance(sys.stdout, ThreadedStream):
+            sys.stdout = ThreadedStream()
+        _local.stream = HTMLStringO()
+    push = staticmethod(push)
+
+    def fetch():
+        try:
+            stream = _local.stream
+        except AttributeError:
+            return ''
+        return stream.reset()
+    fetch = staticmethod(fetch)
+
+    def displayhook(obj):
+        try:
+            stream = _local.stream
+        except AttributeError:
+            return _displayhook(obj)
+        # stream._write bypasses escaping as debug_repr is
+        # already generating HTML for us.
+        if obj is not None:
+            stream._write(debug_repr(obj))
+    displayhook = staticmethod(displayhook)
+
+    def __setattr__(self, name, value):
+        raise AttributeError('read only attribute %s' % name)
+
+    def __dir__(self):
+        return dir(sys.__stdout__)
+
+    def __getattribute__(self, name):
+        if name == '__members__':
+            return dir(sys.__stdout__)
+        try:
+            stream = _local.stream
+        except AttributeError:
+            stream = sys.__stdout__
+        return getattr(stream, name)
+
+    def __repr__(self):
+        return repr(sys.__stdout__)
+
+
+# add the threaded stream as display hook
+_displayhook = sys.displayhook
+sys.displayhook = ThreadedStream.displayhook
+
+
+class _ConsoleLoader(object):
+
+    def __init__(self):
+        self._storage = {}
+
+    def register(self, code, source):
+        self._storage[id(code)] = source
+        # register code objects of wrapped functions too.
+        for var in code.co_consts:
+            if isinstance(var, CodeType):
+                self._storage[id(var)] = source
+
+    def get_source_by_code(self, code):
+        try:
+            return self._storage[id(code)]
+        except KeyError:
+            pass
+
+
+def _wrap_compiler(console):
+    compile = console.compile
+    def func(source, filename, symbol):
+        code = compile(source, filename, symbol)
+        console.loader.register(code, source)
+        return code
+    console.compile = func
+
+
+class _InteractiveConsole(code.InteractiveInterpreter):
+
+    def __init__(self, globals, locals):
+        code.InteractiveInterpreter.__init__(self, locals)
+        self.globals = dict(globals)
+        self.globals['dump'] = dump
+        self.globals['help'] = helper
+        self.globals['__loader__'] = self.loader = _ConsoleLoader()
+        self.more = False
+        self.buffer = []
+        _wrap_compiler(self)
+
+    def runsource(self, source):
+        source = source.rstrip() + '\n'
+        ThreadedStream.push()
+        prompt = self.more and '... ' or '>>> '
+        try:
+            source_to_eval = ''.join(self.buffer + [source])
+            if code.InteractiveInterpreter.runsource(self,
+               source_to_eval, '<debugger>', 'single'):
+                self.more = True
+                self.buffer.append(source)
+            else:
+                self.more = False
+                del self.buffer[:]
+        finally:
+            output = ThreadedStream.fetch()
+        return prompt + source + output
+
+    def runcode(self, code):
+        try:
+            exec code in self.globals, self.locals
+        except:
+            self.showtraceback()
+
+    def showtraceback(self):
+        from werkzeug.debug.tbtools import get_current_traceback
+        tb = get_current_traceback(skip=1)
+        sys.stdout._write(tb.render_summary())
+
+    def showsyntaxerror(self, filename=None):
+        from werkzeug.debug.tbtools import get_current_traceback
+        tb = get_current_traceback(skip=4)
+        sys.stdout._write(tb.render_summary())
+
+    def write(self, data):
+        sys.stdout.write(data)
+
+
+class Console(object):
+    """An interactive console."""
+
+    def __init__(self, globals=None, locals=None):
+        if locals is None:
+            locals = {}
+        if globals is None:
+            globals = {}
+        self._ipy = _InteractiveConsole(globals, locals)
+
+    def eval(self, code):
+        return self._ipy.runsource(code)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/render.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.render
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Render the traceback debugging page.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import pprint
+from os.path import dirname, join
+
+from werkzeug.templates import Template
+
+
+def get_template(name):
+    return Template.from_file(join(dirname(__file__), 'shared', name),
+                              unicode_mode=False, errors='ignore')
+
+
+def load_resource(res):
+    try:
+        f = file(join(dirname(__file__), 'shared', res))
+    except IOError:
+        return ''
+    try:
+        return f.read()
+    finally:
+        f.close()
+
+
+t_body = get_template('body.tmpl')
+t_codetable = get_template('codetable.tmpl')
+t_vartable = get_template('vartable.tmpl')
+
+
+def code_table(frame):
+    from werkzeug.debug.util import Namespace
+    lines = []
+    lineno = frame['context_lineno']
+    if lineno is not None:
+        lineno += 1
+        for l in frame['pre_context']:
+            lines.append(Namespace(mode='pre', lineno=lineno, code=l))
+            lineno += 1
+        lines.append(Namespace(mode='cur', lineno=lineno,
+                               code=frame['context_line']))
+        lineno += 1
+        for l in frame['post_context']:
+            lines.append(Namespace(mode='post', lineno=lineno, code=l))
+            lineno += 1
+    else:
+        lines.append(Namespace(mode='cur', lineno=1,
+                               code='Sourcecode not available'))
+
+    return t_codetable.render(lines=lines)
+
+
+def var_table(var):
+    def safe_pformat(x):
+        try:
+            lines = pprint.pformat(x).splitlines()
+        except:
+            return '?'
+        tmp = []
+        for line in lines:
+            if len(line) > 79:
+                line = line[:79] + '...'
+            tmp.append(line)
+        return '\n'.join(tmp)
+
+    # dicts
+    if isinstance(var, dict) or hasattr(var, 'items'):
+        value = var.items()
+        if not value:
+            typ = 'empty'
+        else:
+            typ = 'dict'
+            value.sort()
+            value = [(repr(key), safe_pformat(val)) for key, val in value]
+
+    # lists
+    elif isinstance(var, list):
+        if not var:
+            typ = 'empty'
+        else:
+            typ = 'list'
+        value = [safe_pformat(item) for item in var]
+
+    # others
+    else:
+        typ = 'simple'
+        value = repr(var)
+
+    return t_vartable.render(type=typ, value=value)
+
+
+def debug_page(context):
+    tc = context.to_dict()
+    tc['var_table'] = var_table
+    tc['code_table'] = code_table
+    return t_body.render(tc)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/repr.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,238 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.repr
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements object representations for debugging purposes.
+    Unlike the default repr these reprs expose a lot more information and
+    produce HTML instead of ASCII.
+
+    Together with the CSS and JavaScript files of the debugger this gives
+    a colorful and more compact output.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD.
+"""
+import sys
+import re
+from traceback import format_exception_only
+try:
+    from collections import deque
+except ImportError: # pragma: no cover
+    deque = None
+from werkzeug.utils import escape
+from werkzeug.debug.utils import render_template
+
+
+missing = object()
+_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
+RegexType = type(_paragraph_re)
+
+
+def debug_repr(obj):
+    """Creates a debug repr of an object as HTML unicode string."""
+    return DebugReprGenerator().repr(obj)
+
+
+def dump(obj=missing):
+    """Print the object details to stdout._write (for the interactive
+    console of the web debugger.
+    """
+    gen = DebugReprGenerator()
+    if obj is missing:
+        rv = gen.dump_locals(sys._getframe(1).f_locals)
+    else:
+        rv = gen.dump_object(obj)
+    sys.stdout._write(rv)
+
+
+class _Helper(object):
+    """Displays an HTML version of the normal help, for the interactive
+    debugger only because it requires a patched sys.stdout.
+    """
+
+    def __call__(self, topic=None):
+        title = text = None
+        if topic is not None:
+            import pydoc
+            pydoc.help(topic)
+            rv = sys.stdout.reset().decode('utf-8', 'ignore')
+            paragraphs = _paragraph_re.split(rv)
+            if len(paragraphs) > 1:
+                title = paragraphs[0]
+                text = '\n\n'.join(paragraphs[1:])
+            else: # pragma: no cover
+                title = 'Help'
+                text = paragraphs[0]
+        rv = render_template('help_command.html', title=title, text=text)
+        sys.stdout._write(rv)
+
+helper = _Helper()
+
+
+def _add_subclass_info(inner, obj, base):
+    if isinstance(base, tuple):
+        for base in base:
+            if type(obj) is base:
+                return inner
+    elif type(obj) is base:
+        return inner
+    module = ''
+    if obj.__class__.__module__ not in ('__builtin__', 'exceptions'):
+        module = '<span class="module">%s.</span>' % obj.__class__.__module__
+    return '%s%s(%s)' % (module, obj.__class__.__name__, inner)
+
+
+class DebugReprGenerator(object):
+
+    def __init__(self):
+        self._stack = []
+
+    def _sequence_repr_maker(left, right, base=object(), limit=8):
+        def proxy(self, obj, recursive):
+            if recursive:
+                return _add_subclass_info(left + '...' + right, obj, base)
+            buf = [left]
+            have_extended_section = False
+            for idx, item in enumerate(obj):
+                if idx:
+                    buf.append(', ')
+                if idx == limit:
+                    buf.append('<span class="extended">')
+                    have_extended_section = True
+                buf.append(self.repr(item))
+            if have_extended_section:
+                buf.append('</span>')
+            buf.append(right)
+            return _add_subclass_info(u''.join(buf), obj, base)
+        return proxy
+
+    list_repr = _sequence_repr_maker('[', ']', list)
+    tuple_repr = _sequence_repr_maker('(', ')', tuple)
+    set_repr = _sequence_repr_maker('set([', '])', set)
+    frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset)
+    if deque is not None:
+        deque_repr = _sequence_repr_maker('<span class="module">collections.'
+                                          '</span>deque([', '])', deque)
+    del _sequence_repr_maker
+
+    def regex_repr(self, obj):
+        pattern = repr(obj.pattern).decode('string-escape', 'ignore')
+        if pattern[:1] == 'u':
+            pattern = 'ur' + pattern[1:]
+        else:
+            pattern = 'r' + pattern
+        return u're.compile(<span class="string regex">%s</span>)' % pattern
+
+    def string_repr(self, obj, limit=70):
+        buf = ['<span class="string">']
+        escaped = escape(obj)
+        a = repr(escaped[:limit])
+        b = repr(escaped[limit:])
+        if isinstance(obj, unicode):
+            buf.append('u')
+            a = a[1:]
+            b = b[1:]
+        if b != "''":
+            buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>'))
+        else:
+            buf.append(a)
+        buf.append('</span>')
+        return _add_subclass_info(u''.join(buf), obj, (str, unicode))
+
+    def dict_repr(self, d, recursive, limit=5):
+        if recursive:
+            return _add_subclass_info(u'{...}', d, dict)
+        buf = ['{']
+        have_extended_section = False
+        for idx, (key, value) in enumerate(d.iteritems()):
+            if idx:
+                buf.append(', ')
+            if idx == limit - 1:
+                buf.append('<span class="extended">')
+                have_extended_section = True
+            buf.append('<span class="pair"><span class="key">%s</span>: '
+                       '<span class="value">%s</span></span>' %
+                       (self.repr(key), self.repr(value)))
+        if have_extended_section:
+            buf.append('</span>')
+        buf.append('}')
+        return _add_subclass_info(u''.join(buf), d, dict)
+
+    def object_repr(self, obj):
+        return u'<span class="object">%s</span>' % \
+               escape(repr(obj).decode('utf-8', 'replace'))
+
+    def dispatch_repr(self, obj, recursive):
+        if obj is helper:
+            return helper.get_help(None)
+        if isinstance(obj, (int, long, float, complex)):
+            return u'<span class="number">%r</span>' % obj
+        if isinstance(obj, basestring):
+            return self.string_repr(obj)
+        if isinstance(obj, RegexType):
+            return self.regex_repr(obj)
+        if isinstance(obj, list):
+            return self.list_repr(obj, recursive)
+        if isinstance(obj, tuple):
+            return self.tuple_repr(obj, recursive)
+        if isinstance(obj, set):
+            return self.set_repr(obj, recursive)
+        if isinstance(obj, frozenset):
+            return self.frozenset_repr(obj, recursive)
+        if isinstance(obj, dict):
+            return self.dict_repr(obj, recursive)
+        if deque is not None and isinstance(obj, deque):
+            return self.deque_repr(obj, recursive)
+        return self.object_repr(obj)
+
+    def fallback_repr(self):
+        try:
+            info = ''.join(format_exception_only(*sys.exc_info()[:2]))
+        except: # pragma: no cover
+            info = '?'
+        return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' \
+               u'</span>' % escape(info.decode('utf-8', 'ignore').strip())
+
+    def repr(self, obj):
+        recursive = False
+        for item in self._stack:
+            if item is obj:
+                recursive = True
+                break
+        self._stack.append(obj)
+        try:
+            try:
+                return self.dispatch_repr(obj, recursive)
+            except:
+                return self.fallback_repr()
+        finally:
+            self._stack.pop()
+
+    def dump_object(self, obj):
+        repr = items = None
+        if isinstance(obj, dict):
+            title = 'Contents of'
+            items = []
+            for key, value in obj.iteritems():
+                if not isinstance(key, basestring):
+                    items = None
+                    break
+                items.append((key, self.repr(value)))
+        if items is None:
+            items = []
+            repr = self.repr(obj)
+            for key in dir(obj):
+                try:
+                    items.append((key, self.repr(getattr(obj, key))))
+                except:
+                    pass
+            title = 'Details for'
+        title += ' ' + object.__repr__(obj)[1:-1]
+        return render_template('dump_object.html', items=items,
+                               title=title, repr=repr)
+
+    def dump_locals(self, d):
+        items = [(key, self.repr(value)) for key, value in d.items()]
+        return render_template('dump_object.html', items=items,
+                               title='Local variables in frame', repr=None)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/body.tmpl	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,81 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>$escape(exception_type) in $escape(last_frame['basename']) (Werkzeug Debugger)</title>
+    <link rel="stylesheet" href="__traceback__?resource=style.css&amp;mimetype=text/css" type="text/css">
+    <script type="text/javascript" src="__traceback__?resource=jquery.js&amp;mimetype=text/javascript"></script>
+    <script type="text/javascript" src="__traceback__?resource=debugger.js&amp;mimetype=text/javascript"></script>
+  </head>
+  <body>
+    <div class="traceback_wrapper">
+      <h1>$escape(exception_type)</h1>
+      <p class="errormsg">$escape(exception_value)</p>
+
+      <p class="errorline">
+        $escape(last_frame['filename']) in
+        $escape(last_frame['function']),
+        line $last_frame['lineno']
+      </p>
+
+      <h2 onclick="changeTB()" class="tb">Traceback <span>(toggle raw view)</span></h2>
+      <div id="interactive">
+        <p class="text">A problem occurred in your Python WSGI application.
+          Here is the sequence of function calls leading up to the error, in the order
+          they occurred. Activate a code line to toggle context lines.</p>
+
+      <% for num, frame in enumerate(frames) %>
+        <div class="frame" id="frame-$num">
+          <h3 class="fn"><em>$escape(frame['function'])</em> in <tt>$escape(frame['filename'])</tt></h3>
+          <a class="locals" href="javascript:toggleFrameVars($num)">[inspect]</a>
+          <% if evalex %><a class="eval" href="javascript:toggleInterpreter($num)">[console]</a><% endif %>
+          $code_table(frame)
+          $var_table(frame['vars'])
+          <% if evalex %>
+            <form class="exec_code" action="">
+              <pre class="output">[console ready]</pre>
+              <input type="hidden" name="tb" value="$tb_uid">
+              <input type="hidden" name="frame" value="$frame['frame_uid']">
+              <input type="text" name="cmd" class="input" value="">
+            </form>
+          <% endif %>
+        </div>
+      <% endfor %>
+      </div>
+
+      <div id="plain">
+        <p class="text">Here is the plain Python traceback for copy and paste:</p>
+        <pre class="plain">$escape(plaintb)</pre>
+        <p class="text pastebininfo">
+          <a href="javascript:pasteIt()">Create a new Paste</a> with
+          this traceback in the lodgeit pastebin.
+        </p>
+      </div>
+
+      <% if req_vars %>
+        <h2>Request Data</h2>
+        <p class="text">The following list contains all important request variables.
+          Select a header to expand the list.</p>
+        <% for num, (key, info) in enumerate(req_vars) %>
+          <dl>
+            <dt onclick="toggleTableVars($num)">$escape(key)</dt>
+            <dd id="tvar-$num">$var_table(info)</dd>
+          </dl>
+        <% endfor %>
+      <% endif %>
+    </div>
+
+    <div id="footer">
+      Brought to you by <span class="arthur">DON'T PANIC</span>, your friendly
+      Werkzeug powered traceback interpreter.
+    </div>
+  </body>
+</html>
+
+<!-- Plain traceback:
+
+<%py
+  import re
+  print re.sub('-{2,}', '-', plaintb)
+%>
+-->
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/codetable.tmpl	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,8 @@
+<table class="code">
+<% for line in lines %>
+  <tr class="$line.mode">
+    <td class="lineno">$line.lineno</td>
+    <td class="code">$line.code</td>
+  </tr>
+<% endfor %>
+</table>
Binary file bundled/werkzeug/werkzeug/debug/shared/console.png has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/debugger.js	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,196 @@
+$(function() {
+  var sourceView = null;
+
+  /**
+   * if we are in console mode, show the console.
+   */
+  if (CONSOLE_MODE && EVALEX) {
+    openShell(null, $('div.console div.inner').empty(), 0);
+  }
+
+  $('div.traceback div.frame').each(function() {
+    var
+      target = $('pre', this)
+        .click(function() {
+          sourceButton.click();
+        }),
+      consoleNode = null, source = null,
+      frameID = this.id.substring(6);
+
+    /**
+     * Add an interactive console to the frames
+     */
+    if (EVALEX)
+      $('<img src="./__debugger__?cmd=resource&f=console.png">')
+        .attr('title', 'Open an interactive python shell in this frame')
+        .click(function() {
+          consoleNode = openShell(consoleNode, target, frameID);
+          return false;
+        })
+        .prependTo(target);
+
+    /**
+     * Show sourcecode
+     */
+    var sourceButton = $('<img src="./__debugger__?cmd=resource&f=source.png">')
+      .attr('title', 'Display the sourcecode for this frame')
+      .click(function() {
+        if (!sourceView)
+          $('h2', sourceView =
+            $('<div class="box"><h2>View Source</h2><div class="sourceview">' +
+              '<table></table></div>')
+              .insertBefore('div.explanation'))
+            .css('cursor', 'pointer')
+            .click(function() {
+              sourceView.slideUp('fast');
+            });
+        $.get('./__debugger__', {cmd: 'source', frm: frameID}, function(data) {
+          $('table', sourceView)
+            .replaceWith(data);
+          if (!sourceView.is(':visible'))
+            sourceView.slideDown('fast', function() {
+              focusSourceBlock();
+            });
+          else
+            focusSourceBlock();
+        });
+        return false;
+      })
+      .prependTo(target);
+  });
+
+  /**
+   * toggle traceback types on click.
+   */
+  $('h2.traceback').click(function() {
+    $(this).next().slideToggle('fast');
+    $('div.plain').slideToggle('fast');
+  }).css('cursor', 'pointer');
+  $('div.plain').hide();
+
+  /**
+   * Add extra info (this is here so that only users with JavaScript
+   * enabled see it.)
+   */
+  $('span.nojavascript')
+    .removeClass('nojavascript')
+    .html('<p>To switch between the interactive traceback and the plaintext ' +
+          'one, you can click on the "Traceback" headline.  From the text ' +
+          'traceback you can also create a paste of it. ' + (!EVALEX ? '' :
+          'For code execution mouse-over the frame you want to debug and ' +
+          'click on the console icon on the right side.' +
+          '<p>You can execute arbitrary Python code in the stack frames and ' +
+          'there are some extra helpers available for introspection:' +
+          '<ul><li><code>dump()</code> shows all variables in the frame' +
+          '<li><code>dump(obj)</code> dumps all that\'s known about the object</ul>'));
+
+  /**
+   * Add the pastebin feature
+   */
+  $('div.plain form')
+    .submit(function() {
+      var label = $('input[type="submit"]', this);
+      var old_val = label.val();
+      label.val('submitting...');
+      $.ajax({
+        dataType:     'json',
+        url:          './__debugger__',
+        data:         {tb: TRACEBACK, cmd: 'paste'},
+        success:      function(data) {
+          $('div.plain span.pastemessage')
+            .removeClass('pastemessage')
+            .text('Paste created: ')
+            .append($('<a>#' + data.id + '</a>').attr('href', data.url));
+        },
+        error:        function() {
+          alert('Error: Could not submit paste.  No network connection?');
+          label.val(old_val);
+        }
+      });
+      return false;
+    });
+
+  // if we have javascript we submit by ajax anyways, so no need for the
+  // not scaling textarea.
+  var plainTraceback = $('div.plain textarea');
+  plainTraceback.replaceWith($('<pre>').text(plainTraceback.text()));
+});
+
+
+/**
+ * Helper function for shell initialization
+ */
+function openShell(consoleNode, target, frameID) {
+  if (consoleNode)
+    return consoleNode.slideToggle('fast');
+  consoleNode = $('<pre class="console">')
+    .appendTo(target.parent())
+    .hide()
+  var historyPos = 0, history = [''];
+  var output = $('<div class="output">[console ready]</div>')
+    .appendTo(consoleNode);
+  var form = $('<form>&gt;&gt;&gt; </form>')
+    .submit(function() {
+      var cmd = command.val();
+      $.get('./__debugger__', {cmd: cmd, frm: frameID}, function(data) {
+        var tmp = $('<div>').html(data);
+        $('span.extended', tmp).each(function() {
+          var hidden = $(this).wrap('<span>').hide();
+          hidden
+            .parent()
+            .append($('<a href="#" class="toggle">&nbsp;&nbsp;</a>')
+              .click(function() {
+                hidden.toggle();
+                $(this).toggleClass('open')
+                return false;
+              }));
+        });
+        output.append(tmp);
+        command.focus();
+        var old = history.pop();
+        history.push(cmd);
+        if (typeof old != 'undefined')
+          history.push(old);
+        historyPos = history.length - 1;
+      });
+      command.val('');
+      return false;
+    }).
+    appendTo(consoleNode);
+
+  var command = $('<input type="text">')
+    .appendTo(form)
+    .keydown(function(e) {
+      if (e.charCode == 100 && e.ctrlKey) {
+        output.text('--- screen cleared ---');
+        return false;
+      }
+      else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) {
+        if (e.keyCode == 38 && historyPos > 0)
+          historyPos--;
+        else if (e.keyCode == 40 && historyPos < history.length)
+          historyPos++;
+        command.val(history[historyPos]);
+        return false;
+      }
+    });
+    
+  return consoleNode.slideDown('fast', function() {
+    command.focus();
+  });
+}
+
+/**
+ * Focus the current block in the source view.
+ */
+function focusSourceBlock() {
+  var tmp, line = $('table.source tr.current');
+  for (var i = 0; i < 7; i++) {
+    tmp = line.prev();
+    if (!(tmp && tmp.is('.in-frame')))
+      break
+    line = tmp;
+  }
+  var container = $('div.sourceview')[0];
+  container.scrollTop = line.offset().top - container.offsetTop;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/jquery.js	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,19 @@
+/*
+ * jQuery JavaScript Library v1.3.2
+ * http://jquery.com/
+ *
+ * Copyright (c) 2009 John Resig
+ * Dual licensed under the MIT and GPL licenses.
+ * http://docs.jquery.com/License
+ *
+ * Date: 2009-02-19 17:34:21 -0500 (Thu, 19 Feb 2009)
+ * Revision: 6246
+ */
+(function(){var l=this,g,y=l.jQuery,p=l.$,o=l.jQuery=l.$=function(E,F){return new o.fn.init(E,F)},D=/^[^<]*(<(.|\s)+>)[^>]*$|^#([\w-]+)$/,f=/^.[^:#\[\.,]*$/;o.fn=o.prototype={init:function(E,H){E=E||document;if(E.nodeType){this[0]=E;this.length=1;this.context=E;return this}if(typeof E==="string"){var G=D.exec(E);if(G&&(G[1]||!H)){if(G[1]){E=o.clean([G[1]],H)}else{var I=document.getElementById(G[3]);if(I&&I.id!=G[3]){return o().find(E)}var F=o(I||[]);F.context=document;F.selector=E;return F}}else{return o(H).find(E)}}else{if(o.isFunction(E)){return o(document).ready(E)}}if(E.selector&&E.context){this.selector=E.selector;this.context=E.context}return this.setArray(o.isArray(E)?E:o.makeArray(E))},selector:"",jquery:"1.3.2",size:function(){return this.length},get:function(E){return E===g?Array.prototype.slice.call(this):this[E]},pushStack:function(F,H,E){var G=o(F);G.prevObject=this;G.context=this.context;if(H==="find"){G.selector=this.selector+(this.selector?" ":"")+E}else{if(H){G.selector=this.selector+"."+H+"("+E+")"}}return G},setArray:function(E){this.length=0;Array.prototype.push.apply(this,E);return this},each:function(F,E){return o.each(this,F,E)},index:function(E){return o.inArray(E&&E.jquery?E[0]:E,this)},attr:function(F,H,G){var E=F;if(typeof F==="string"){if(H===g){return this[0]&&o[G||"attr"](this[0],F)}else{E={};E[F]=H}}return this.each(function(I){for(F in E){o.attr(G?this.style:this,F,o.prop(this,E[F],G,I,F))}})},css:function(E,F){if((E=="width"||E=="height")&&parseFloat(F)<0){F=g}return this.attr(E,F,"curCSS")},text:function(F){if(typeof F!=="object"&&F!=null){return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(F))}var E="";o.each(F||this,function(){o.each(this.childNodes,function(){if(this.nodeType!=8){E+=this.nodeType!=1?this.nodeValue:o.fn.text([this])}})});return E},wrapAll:function(E){if(this[0]){var F=o(E,this[0].ownerDocument).clone();if(this[0].parentNode){F.insertBefore(this[0])}F.map(function(){var G=this;while(G.firstChild){G=G.firstChild}return G}).append(this)}return this},wrapInner:function(E){return this.each(function(){o(this).contents().wrapAll(E)})},wrap:function(E){return this.each(function(){o(this).wrapAll(E)})},append:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.appendChild(E)}})},prepend:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.insertBefore(E,this.firstChild)}})},before:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this)})},after:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this.nextSibling)})},end:function(){return this.prevObject||o([])},push:[].push,sort:[].sort,splice:[].splice,find:function(E){if(this.length===1){var F=this.pushStack([],"find",E);F.length=0;o.find(E,this[0],F);return F}else{return this.pushStack(o.unique(o.map(this,function(G){return o.find(E,G)})),"find",E)}},clone:function(G){var E=this.map(function(){if(!o.support.noCloneEvent&&!o.isXMLDoc(this)){var I=this.outerHTML;if(!I){var J=this.ownerDocument.createElement("div");J.appendChild(this.cloneNode(true));I=J.innerHTML}return o.clean([I.replace(/ jQuery\d+="(?:\d+|null)"/g,"").replace(/^\s*/,"")])[0]}else{return this.cloneNode(true)}});if(G===true){var H=this.find("*").andSelf(),F=0;E.find("*").andSelf().each(function(){if(this.nodeName!==H[F].nodeName){return}var I=o.data(H[F],"events");for(var K in I){for(var J in I[K]){o.event.add(this,K,I[K][J],I[K][J].data)}}F++})}return E},filter:function(E){return this.pushStack(o.isFunction(E)&&o.grep(this,function(G,F){return E.call(G,F)})||o.multiFilter(E,o.grep(this,function(F){return F.nodeType===1})),"filter",E)},closest:function(E){var G=o.expr.match.POS.test(E)?o(E):null,F=0;return this.map(function(){var H=this;while(H&&H.ownerDocument){if(G?G.index(H)>-1:o(H).is(E)){o.data(H,"closest",F);return H}H=H.parentNode;F++}})},not:function(E){if(typeof E==="string"){if(f.test(E)){return this.pushStack(o.multiFilter(E,this,true),"not",E)}else{E=o.multiFilter(E,this)}}var F=E.length&&E[E.length-1]!==g&&!E.nodeType;return this.filter(function(){return F?o.inArray(this,E)<0:this!=E})},add:function(E){return this.pushStack(o.unique(o.merge(this.get(),typeof E==="string"?o(E):o.makeArray(E))))},is:function(E){return !!E&&o.multiFilter(E,this).length>0},hasClass:function(E){return !!E&&this.is("."+E)},val:function(K){if(K===g){var E=this[0];if(E){if(o.nodeName(E,"option")){return(E.attributes.value||{}).specified?E.value:E.text}if(o.nodeName(E,"select")){var I=E.selectedIndex,L=[],M=E.options,H=E.type=="select-one";if(I<0){return null}for(var F=H?I:0,J=H?I+1:M.length;F<J;F++){var G=M[F];if(G.selected){K=o(G).val();if(H){return K}L.push(K)}}return L}return(E.value||"").replace(/\r/g,"")}return g}if(typeof K==="number"){K+=""}return this.each(function(){if(this.nodeType!=1){return}if(o.isArray(K)&&/radio|checkbox/.test(this.type)){this.checked=(o.inArray(this.value,K)>=0||o.inArray(this.name,K)>=0)}else{if(o.nodeName(this,"select")){var N=o.makeArray(K);o("option",this).each(function(){this.selected=(o.inArray(this.value,N)>=0||o.inArray(this.text,N)>=0)});if(!N.length){this.selectedIndex=-1}}else{this.value=K}}})},html:function(E){return E===g?(this[0]?this[0].innerHTML.replace(/ jQuery\d+="(?:\d+|null)"/g,""):null):this.empty().append(E)},replaceWith:function(E){return this.after(E).remove()},eq:function(E){return this.slice(E,+E+1)},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments),"slice",Array.prototype.slice.call(arguments).join(","))},map:function(E){return this.pushStack(o.map(this,function(G,F){return E.call(G,F,G)}))},andSelf:function(){return this.add(this.prevObject)},domManip:function(J,M,L){if(this[0]){var I=(this[0].ownerDocument||this[0]).createDocumentFragment(),F=o.clean(J,(this[0].ownerDocument||this[0]),I),H=I.firstChild;if(H){for(var G=0,E=this.length;G<E;G++){L.call(K(this[G],H),this.length>1||G>0?I.cloneNode(true):I)}}if(F){o.each(F,z)}}return this;function K(N,O){return M&&o.nodeName(N,"table")&&o.nodeName(O,"tr")?(N.getElementsByTagName("tbody")[0]||N.appendChild(N.ownerDocument.createElement("tbody"))):N}}};o.fn.init.prototype=o.fn;function z(E,F){if(F.src){o.ajax({url:F.src,async:false,dataType:"script"})}else{o.globalEval(F.text||F.textContent||F.innerHTML||"")}if(F.parentNode){F.parentNode.removeChild(F)}}function e(){return +new Date}o.extend=o.fn.extend=function(){var J=arguments[0]||{},H=1,I=arguments.length,E=false,G;if(typeof J==="boolean"){E=J;J=arguments[1]||{};H=2}if(typeof J!=="object"&&!o.isFunction(J)){J={}}if(I==H){J=this;--H}for(;H<I;H++){if((G=arguments[H])!=null){for(var F in G){var K=J[F],L=G[F];if(J===L){continue}if(E&&L&&typeof L==="object"&&!L.nodeType){J[F]=o.extend(E,K||(L.length!=null?[]:{}),L)}else{if(L!==g){J[F]=L}}}}}return J};var b=/z-?index|font-?weight|opacity|zoom|line-?height/i,q=document.defaultView||{},s=Object.prototype.toString;o.extend({noConflict:function(E){l.$=p;if(E){l.jQuery=y}return o},isFunction:function(E){return s.call(E)==="[object Function]"},isArray:function(E){return s.call(E)==="[object Array]"},isXMLDoc:function(E){return E.nodeType===9&&E.documentElement.nodeName!=="HTML"||!!E.ownerDocument&&o.isXMLDoc(E.ownerDocument)},globalEval:function(G){if(G&&/\S/.test(G)){var F=document.getElementsByTagName("head")[0]||document.documentElement,E=document.createElement("script");E.type="text/javascript";if(o.support.scriptEval){E.appendChild(document.createTextNode(G))}else{E.text=G}F.insertBefore(E,F.firstChild);F.removeChild(E)}},nodeName:function(F,E){return F.nodeName&&F.nodeName.toUpperCase()==E.toUpperCase()},each:function(G,K,F){var E,H=0,I=G.length;if(F){if(I===g){for(E in G){if(K.apply(G[E],F)===false){break}}}else{for(;H<I;){if(K.apply(G[H++],F)===false){break}}}}else{if(I===g){for(E in G){if(K.call(G[E],E,G[E])===false){break}}}else{for(var J=G[0];H<I&&K.call(J,H,J)!==false;J=G[++H]){}}}return G},prop:function(H,I,G,F,E){if(o.isFunction(I)){I=I.call(H,F)}return typeof I==="number"&&G=="curCSS"&&!b.test(E)?I+"px":I},className:{add:function(E,F){o.each((F||"").split(/\s+/),function(G,H){if(E.nodeType==1&&!o.className.has(E.className,H)){E.className+=(E.className?" ":"")+H}})},remove:function(E,F){if(E.nodeType==1){E.className=F!==g?o.grep(E.className.split(/\s+/),function(G){return !o.className.has(F,G)}).join(" "):""}},has:function(F,E){return F&&o.inArray(E,(F.className||F).toString().split(/\s+/))>-1}},swap:function(H,G,I){var E={};for(var F in G){E[F]=H.style[F];H.style[F]=G[F]}I.call(H);for(var F in G){H.style[F]=E[F]}},css:function(H,F,J,E){if(F=="width"||F=="height"){var L,G={position:"absolute",visibility:"hidden",display:"block"},K=F=="width"?["Left","Right"]:["Top","Bottom"];function I(){L=F=="width"?H.offsetWidth:H.offsetHeight;if(E==="border"){return}o.each(K,function(){if(!E){L-=parseFloat(o.curCSS(H,"padding"+this,true))||0}if(E==="margin"){L+=parseFloat(o.curCSS(H,"margin"+this,true))||0}else{L-=parseFloat(o.curCSS(H,"border"+this+"Width",true))||0}})}if(H.offsetWidth!==0){I()}else{o.swap(H,G,I)}return Math.max(0,Math.round(L))}return o.curCSS(H,F,J)},curCSS:function(I,F,G){var L,E=I.style;if(F=="opacity"&&!o.support.opacity){L=o.attr(E,"opacity");return L==""?"1":L}if(F.match(/float/i)){F=w}if(!G&&E&&E[F]){L=E[F]}else{if(q.getComputedStyle){if(F.match(/float/i)){F="float"}F=F.replace(/([A-Z])/g,"-$1").toLowerCase();var M=q.getComputedStyle(I,null);if(M){L=M.getPropertyValue(F)}if(F=="opacity"&&L==""){L="1"}}else{if(I.currentStyle){var J=F.replace(/\-(\w)/g,function(N,O){return O.toUpperCase()});L=I.currentStyle[F]||I.currentStyle[J];if(!/^\d+(px)?$/i.test(L)&&/^\d/.test(L)){var H=E.left,K=I.runtimeStyle.left;I.runtimeStyle.left=I.currentStyle.left;E.left=L||0;L=E.pixelLeft+"px";E.left=H;I.runtimeStyle.left=K}}}}return L},clean:function(F,K,I){K=K||document;if(typeof K.createElement==="undefined"){K=K.ownerDocument||K[0]&&K[0].ownerDocument||document}if(!I&&F.length===1&&typeof F[0]==="string"){var H=/^<(\w+)\s*\/?>$/.exec(F[0]);if(H){return[K.createElement(H[1])]}}var G=[],E=[],L=K.createElement("div");o.each(F,function(P,S){if(typeof S==="number"){S+=""}if(!S){return}if(typeof S==="string"){S=S.replace(/(<(\w+)[^>]*?)\/>/g,function(U,V,T){return T.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?U:V+"></"+T+">"});var O=S.replace(/^\s+/,"").substring(0,10).toLowerCase();var Q=!O.indexOf("<opt")&&[1,"<select multiple='multiple'>","</select>"]||!O.indexOf("<leg")&&[1,"<fieldset>","</fieldset>"]||O.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"<table>","</table>"]||!O.indexOf("<tr")&&[2,"<table><tbody>","</tbody></table>"]||(!O.indexOf("<td")||!O.indexOf("<th"))&&[3,"<table><tbody><tr>","</tr></tbody></table>"]||!O.indexOf("<col")&&[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"]||!o.support.htmlSerialize&&[1,"div<div>","</div>"]||[0,"",""];L.innerHTML=Q[1]+S+Q[2];while(Q[0]--){L=L.lastChild}if(!o.support.tbody){var R=/<tbody/i.test(S),N=!O.indexOf("<table")&&!R?L.firstChild&&L.firstChild.childNodes:Q[1]=="<table>"&&!R?L.childNodes:[];for(var M=N.length-1;M>=0;--M){if(o.nodeName(N[M],"tbody")&&!N[M].childNodes.length){N[M].parentNode.removeChild(N[M])}}}if(!o.support.leadingWhitespace&&/^\s/.test(S)){L.insertBefore(K.createTextNode(S.match(/^\s*/)[0]),L.firstChild)}S=o.makeArray(L.childNodes)}if(S.nodeType){G.push(S)}else{G=o.merge(G,S)}});if(I){for(var J=0;G[J];J++){if(o.nodeName(G[J],"script")&&(!G[J].type||G[J].type.toLowerCase()==="text/javascript")){E.push(G[J].parentNode?G[J].parentNode.removeChild(G[J]):G[J])}else{if(G[J].nodeType===1){G.splice.apply(G,[J+1,0].concat(o.makeArray(G[J].getElementsByTagName("script"))))}I.appendChild(G[J])}}return E}return G},attr:function(J,G,K){if(!J||J.nodeType==3||J.nodeType==8){return g}var H=!o.isXMLDoc(J),L=K!==g;G=H&&o.props[G]||G;if(J.tagName){var F=/href|src|style/.test(G);if(G=="selected"&&J.parentNode){J.parentNode.selectedIndex}if(G in J&&H&&!F){if(L){if(G=="type"&&o.nodeName(J,"input")&&J.parentNode){throw"type property can't be changed"}J[G]=K}if(o.nodeName(J,"form")&&J.getAttributeNode(G)){return J.getAttributeNode(G).nodeValue}if(G=="tabIndex"){var I=J.getAttributeNode("tabIndex");return I&&I.specified?I.value:J.nodeName.match(/(button|input|object|select|textarea)/i)?0:J.nodeName.match(/^(a|area)$/i)&&J.href?0:g}return J[G]}if(!o.support.style&&H&&G=="style"){return o.attr(J.style,"cssText",K)}if(L){J.setAttribute(G,""+K)}var E=!o.support.hrefNormalized&&H&&F?J.getAttribute(G,2):J.getAttribute(G);return E===null?g:E}if(!o.support.opacity&&G=="opacity"){if(L){J.zoom=1;J.filter=(J.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(K)+""=="NaN"?"":"alpha(opacity="+K*100+")")}return J.filter&&J.filter.indexOf("opacity=")>=0?(parseFloat(J.filter.match(/opacity=([^)]*)/)[1])/100)+"":""}G=G.replace(/-([a-z])/ig,function(M,N){return N.toUpperCase()});if(L){J[G]=K}return J[G]},trim:function(E){return(E||"").replace(/^\s+|\s+$/g,"")},makeArray:function(G){var E=[];if(G!=null){var F=G.length;if(F==null||typeof G==="string"||o.isFunction(G)||G.setInterval){E[0]=G}else{while(F){E[--F]=G[F]}}}return E},inArray:function(G,H){for(var E=0,F=H.length;E<F;E++){if(H[E]===G){return E}}return -1},merge:function(H,E){var F=0,G,I=H.length;if(!o.support.getAll){while((G=E[F++])!=null){if(G.nodeType!=8){H[I++]=G}}}else{while((G=E[F++])!=null){H[I++]=G}}return H},unique:function(K){var F=[],E={};try{for(var G=0,H=K.length;G<H;G++){var J=o.data(K[G]);if(!E[J]){E[J]=true;F.push(K[G])}}}catch(I){F=K}return F},grep:function(F,J,E){var G=[];for(var H=0,I=F.length;H<I;H++){if(!E!=!J(F[H],H)){G.push(F[H])}}return G},map:function(E,J){var F=[];for(var G=0,H=E.length;G<H;G++){var I=J(E[G],G);if(I!=null){F[F.length]=I}}return F.concat.apply([],F)}});var C=navigator.userAgent.toLowerCase();o.browser={version:(C.match(/.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/)||[0,"0"])[1],safari:/webkit/.test(C),opera:/opera/.test(C),msie:/msie/.test(C)&&!/opera/.test(C),mozilla:/mozilla/.test(C)&&!/(compatible|webkit)/.test(C)};o.each({parent:function(E){return E.parentNode},parents:function(E){return o.dir(E,"parentNode")},next:function(E){return o.nth(E,2,"nextSibling")},prev:function(E){return o.nth(E,2,"previousSibling")},nextAll:function(E){return o.dir(E,"nextSibling")},prevAll:function(E){return o.dir(E,"previousSibling")},siblings:function(E){return o.sibling(E.parentNode.firstChild,E)},children:function(E){return o.sibling(E.firstChild)},contents:function(E){return o.nodeName(E,"iframe")?E.contentDocument||E.contentWindow.document:o.makeArray(E.childNodes)}},function(E,F){o.fn[E]=function(G){var H=o.map(this,F);if(G&&typeof G=="string"){H=o.multiFilter(G,H)}return this.pushStack(o.unique(H),E,G)}});o.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(E,F){o.fn[E]=function(G){var J=[],L=o(G);for(var K=0,H=L.length;K<H;K++){var I=(K>0?this.clone(true):this).get();o.fn[F].apply(o(L[K]),I);J=J.concat(I)}return this.pushStack(J,E,G)}});o.each({removeAttr:function(E){o.attr(this,E,"");if(this.nodeType==1){this.removeAttribute(E)}},addClass:function(E){o.className.add(this,E)},removeClass:function(E){o.className.remove(this,E)},toggleClass:function(F,E){if(typeof E!=="boolean"){E=!o.className.has(this,F)}o.className[E?"add":"remove"](this,F)},remove:function(E){if(!E||o.filter(E,[this]).length){o("*",this).add([this]).each(function(){o.event.remove(this);o.removeData(this)});if(this.parentNode){this.parentNode.removeChild(this)}}},empty:function(){o(this).children().remove();while(this.firstChild){this.removeChild(this.firstChild)}}},function(E,F){o.fn[E]=function(){return this.each(F,arguments)}});function j(E,F){return E[0]&&parseInt(o.curCSS(E[0],F,true),10)||0}var h="jQuery"+e(),v=0,A={};o.extend({cache:{},data:function(F,E,G){F=F==l?A:F;var H=F[h];if(!H){H=F[h]=++v}if(E&&!o.cache[H]){o.cache[H]={}}if(G!==g){o.cache[H][E]=G}return E?o.cache[H][E]:H},removeData:function(F,E){F=F==l?A:F;var H=F[h];if(E){if(o.cache[H]){delete o.cache[H][E];E="";for(E in o.cache[H]){break}if(!E){o.removeData(F)}}}else{try{delete F[h]}catch(G){if(F.removeAttribute){F.removeAttribute(h)}}delete o.cache[H]}},queue:function(F,E,H){if(F){E=(E||"fx")+"queue";var G=o.data(F,E);if(!G||o.isArray(H)){G=o.data(F,E,o.makeArray(H))}else{if(H){G.push(H)}}}return G},dequeue:function(H,G){var E=o.queue(H,G),F=E.shift();if(!G||G==="fx"){F=E[0]}if(F!==g){F.call(H)}}});o.fn.extend({data:function(E,G){var H=E.split(".");H[1]=H[1]?"."+H[1]:"";if(G===g){var F=this.triggerHandler("getData"+H[1]+"!",[H[0]]);if(F===g&&this.length){F=o.data(this[0],E)}return F===g&&H[1]?this.data(H[0]):F}else{return this.trigger("setData"+H[1]+"!",[H[0],G]).each(function(){o.data(this,E,G)})}},removeData:function(E){return this.each(function(){o.removeData(this,E)})},queue:function(E,F){if(typeof E!=="string"){F=E;E="fx"}if(F===g){return o.queue(this[0],E)}return this.each(function(){var G=o.queue(this,E,F);if(E=="fx"&&G.length==1){G[0].call(this)}})},dequeue:function(E){return this.each(function(){o.dequeue(this,E)})}});
+/*
+ * Sizzle CSS Selector Engine - v0.9.3
+ *  Copyright 2009, The Dojo Foundation
+ *  Released under the MIT, BSD, and GPL Licenses.
+ *  More information: http://sizzlejs.com/
+ */
+(function(){var R=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?/g,L=0,H=Object.prototype.toString;var F=function(Y,U,ab,ac){ab=ab||[];U=U||document;if(U.nodeType!==1&&U.nodeType!==9){return[]}if(!Y||typeof Y!=="string"){return ab}var Z=[],W,af,ai,T,ad,V,X=true;R.lastIndex=0;while((W=R.exec(Y))!==null){Z.push(W[1]);if(W[2]){V=RegExp.rightContext;break}}if(Z.length>1&&M.exec(Y)){if(Z.length===2&&I.relative[Z[0]]){af=J(Z[0]+Z[1],U)}else{af=I.relative[Z[0]]?[U]:F(Z.shift(),U);while(Z.length){Y=Z.shift();if(I.relative[Y]){Y+=Z.shift()}af=J(Y,af)}}}else{var ae=ac?{expr:Z.pop(),set:E(ac)}:F.find(Z.pop(),Z.length===1&&U.parentNode?U.parentNode:U,Q(U));af=F.filter(ae.expr,ae.set);if(Z.length>0){ai=E(af)}else{X=false}while(Z.length){var ah=Z.pop(),ag=ah;if(!I.relative[ah]){ah=""}else{ag=Z.pop()}if(ag==null){ag=U}I.relative[ah](ai,ag,Q(U))}}if(!ai){ai=af}if(!ai){throw"Syntax error, unrecognized expression: "+(ah||Y)}if(H.call(ai)==="[object Array]"){if(!X){ab.push.apply(ab,ai)}else{if(U.nodeType===1){for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&(ai[aa]===true||ai[aa].nodeType===1&&K(U,ai[aa]))){ab.push(af[aa])}}}else{for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&ai[aa].nodeType===1){ab.push(af[aa])}}}}}else{E(ai,ab)}if(V){F(V,U,ab,ac);if(G){hasDuplicate=false;ab.sort(G);if(hasDuplicate){for(var aa=1;aa<ab.length;aa++){if(ab[aa]===ab[aa-1]){ab.splice(aa--,1)}}}}}return ab};F.matches=function(T,U){return F(T,null,null,U)};F.find=function(aa,T,ab){var Z,X;if(!aa){return[]}for(var W=0,V=I.order.length;W<V;W++){var Y=I.order[W],X;if((X=I.match[Y].exec(aa))){var U=RegExp.leftContext;if(U.substr(U.length-1)!=="\\"){X[1]=(X[1]||"").replace(/\\/g,"");Z=I.find[Y](X,T,ab);if(Z!=null){aa=aa.replace(I.match[Y],"");break}}}}if(!Z){Z=T.getElementsByTagName("*")}return{set:Z,expr:aa}};F.filter=function(ad,ac,ag,W){var V=ad,ai=[],aa=ac,Y,T,Z=ac&&ac[0]&&Q(ac[0]);while(ad&&ac.length){for(var ab in I.filter){if((Y=I.match[ab].exec(ad))!=null){var U=I.filter[ab],ah,af;T=false;if(aa==ai){ai=[]}if(I.preFilter[ab]){Y=I.preFilter[ab](Y,aa,ag,ai,W,Z);if(!Y){T=ah=true}else{if(Y===true){continue}}}if(Y){for(var X=0;(af=aa[X])!=null;X++){if(af){ah=U(af,Y,X,aa);var ae=W^!!ah;if(ag&&ah!=null){if(ae){T=true}else{aa[X]=false}}else{if(ae){ai.push(af);T=true}}}}}if(ah!==g){if(!ag){aa=ai}ad=ad.replace(I.match[ab],"");if(!T){return[]}break}}}if(ad==V){if(T==null){throw"Syntax error, unrecognized expression: "+ad}else{break}}V=ad}return aa};var I=F.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF_-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF_-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF_-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF_-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*_-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF_-]|\\.)+)(?:\((['"]*)((?:\([^\)]+\)|[^\2\(\)]*)+)\2\))?/},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(T){return T.getAttribute("href")}},relative:{"+":function(aa,T,Z){var X=typeof T==="string",ab=X&&!/\W/.test(T),Y=X&&!ab;if(ab&&!Z){T=T.toUpperCase()}for(var W=0,V=aa.length,U;W<V;W++){if((U=aa[W])){while((U=U.previousSibling)&&U.nodeType!==1){}aa[W]=Y||U&&U.nodeName===T?U||false:U===T}}if(Y){F.filter(T,aa,true)}},">":function(Z,U,aa){var X=typeof U==="string";if(X&&!/\W/.test(U)){U=aa?U:U.toUpperCase();for(var V=0,T=Z.length;V<T;V++){var Y=Z[V];if(Y){var W=Y.parentNode;Z[V]=W.nodeName===U?W:false}}}else{for(var V=0,T=Z.length;V<T;V++){var Y=Z[V];if(Y){Z[V]=X?Y.parentNode:Y.parentNode===U}}if(X){F.filter(U,Z,true)}}},"":function(W,U,Y){var V=L++,T=S;if(!U.match(/\W/)){var X=U=Y?U:U.toUpperCase();T=P}T("parentNode",U,V,W,X,Y)},"~":function(W,U,Y){var V=L++,T=S;if(typeof U==="string"&&!U.match(/\W/)){var X=U=Y?U:U.toUpperCase();T=P}T("previousSibling",U,V,W,X,Y)}},find:{ID:function(U,V,W){if(typeof V.getElementById!=="undefined"&&!W){var T=V.getElementById(U[1]);return T?[T]:[]}},NAME:function(V,Y,Z){if(typeof Y.getElementsByName!=="undefined"){var U=[],X=Y.getElementsByName(V[1]);for(var W=0,T=X.length;W<T;W++){if(X[W].getAttribute("name")===V[1]){U.push(X[W])}}return U.length===0?null:U}},TAG:function(T,U){return U.getElementsByTagName(T[1])}},preFilter:{CLASS:function(W,U,V,T,Z,aa){W=" "+W[1].replace(/\\/g,"")+" ";if(aa){return W}for(var X=0,Y;(Y=U[X])!=null;X++){if(Y){if(Z^(Y.className&&(" "+Y.className+" ").indexOf(W)>=0)){if(!V){T.push(Y)}}else{if(V){U[X]=false}}}}return false},ID:function(T){return T[1].replace(/\\/g,"")},TAG:function(U,T){for(var V=0;T[V]===false;V++){}return T[V]&&Q(T[V])?U[1]:U[1].toUpperCase()},CHILD:function(T){if(T[1]=="nth"){var U=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(T[2]=="even"&&"2n"||T[2]=="odd"&&"2n+1"||!/\D/.test(T[2])&&"0n+"+T[2]||T[2]);T[2]=(U[1]+(U[2]||1))-0;T[3]=U[3]-0}T[0]=L++;return T},ATTR:function(X,U,V,T,Y,Z){var W=X[1].replace(/\\/g,"");if(!Z&&I.attrMap[W]){X[1]=I.attrMap[W]}if(X[2]==="~="){X[4]=" "+X[4]+" "}return X},PSEUDO:function(X,U,V,T,Y){if(X[1]==="not"){if(X[3].match(R).length>1||/^\w/.test(X[3])){X[3]=F(X[3],null,null,U)}else{var W=F.filter(X[3],U,V,true^Y);if(!V){T.push.apply(T,W)}return false}}else{if(I.match.POS.test(X[0])||I.match.CHILD.test(X[0])){return true}}return X},POS:function(T){T.unshift(true);return T}},filters:{enabled:function(T){return T.disabled===false&&T.type!=="hidden"},disabled:function(T){return T.disabled===true},checked:function(T){return T.checked===true},selected:function(T){T.parentNode.selectedIndex;return T.selected===true},parent:function(T){return !!T.firstChild},empty:function(T){return !T.firstChild},has:function(V,U,T){return !!F(T[3],V).length},header:function(T){return/h\d/i.test(T.nodeName)},text:function(T){return"text"===T.type},radio:function(T){return"radio"===T.type},checkbox:function(T){return"checkbox"===T.type},file:function(T){return"file"===T.type},password:function(T){return"password"===T.type},submit:function(T){return"submit"===T.type},image:function(T){return"image"===T.type},reset:function(T){return"reset"===T.type},button:function(T){return"button"===T.type||T.nodeName.toUpperCase()==="BUTTON"},input:function(T){return/input|select|textarea|button/i.test(T.nodeName)}},setFilters:{first:function(U,T){return T===0},last:function(V,U,T,W){return U===W.length-1},even:function(U,T){return T%2===0},odd:function(U,T){return T%2===1},lt:function(V,U,T){return U<T[3]-0},gt:function(V,U,T){return U>T[3]-0},nth:function(V,U,T){return T[3]-0==U},eq:function(V,U,T){return T[3]-0==U}},filter:{PSEUDO:function(Z,V,W,aa){var U=V[1],X=I.filters[U];if(X){return X(Z,W,V,aa)}else{if(U==="contains"){return(Z.textContent||Z.innerText||"").indexOf(V[3])>=0}else{if(U==="not"){var Y=V[3];for(var W=0,T=Y.length;W<T;W++){if(Y[W]===Z){return false}}return true}}}},CHILD:function(T,W){var Z=W[1],U=T;switch(Z){case"only":case"first":while(U=U.previousSibling){if(U.nodeType===1){return false}}if(Z=="first"){return true}U=T;case"last":while(U=U.nextSibling){if(U.nodeType===1){return false}}return true;case"nth":var V=W[2],ac=W[3];if(V==1&&ac==0){return true}var Y=W[0],ab=T.parentNode;if(ab&&(ab.sizcache!==Y||!T.nodeIndex)){var X=0;for(U=ab.firstChild;U;U=U.nextSibling){if(U.nodeType===1){U.nodeIndex=++X}}ab.sizcache=Y}var aa=T.nodeIndex-ac;if(V==0){return aa==0}else{return(aa%V==0&&aa/V>=0)}}},ID:function(U,T){return U.nodeType===1&&U.getAttribute("id")===T},TAG:function(U,T){return(T==="*"&&U.nodeType===1)||U.nodeName===T},CLASS:function(U,T){return(" "+(U.className||U.getAttribute("class"))+" ").indexOf(T)>-1},ATTR:function(Y,W){var V=W[1],T=I.attrHandle[V]?I.attrHandle[V](Y):Y[V]!=null?Y[V]:Y.getAttribute(V),Z=T+"",X=W[2],U=W[4];return T==null?X==="!=":X==="="?Z===U:X==="*="?Z.indexOf(U)>=0:X==="~="?(" "+Z+" ").indexOf(U)>=0:!U?Z&&T!==false:X==="!="?Z!=U:X==="^="?Z.indexOf(U)===0:X==="$="?Z.substr(Z.length-U.length)===U:X==="|="?Z===U||Z.substr(0,U.length+1)===U+"-":false},POS:function(X,U,V,Y){var T=U[2],W=I.setFilters[T];if(W){return W(X,V,U,Y)}}}};var M=I.match.POS;for(var O in I.match){I.match[O]=RegExp(I.match[O].source+/(?![^\[]*\])(?![^\(]*\))/.source)}var E=function(U,T){U=Array.prototype.slice.call(U);if(T){T.push.apply(T,U);return T}return U};try{Array.prototype.slice.call(document.documentElement.childNodes)}catch(N){E=function(X,W){var U=W||[];if(H.call(X)==="[object Array]"){Array.prototype.push.apply(U,X)}else{if(typeof X.length==="number"){for(var V=0,T=X.length;V<T;V++){U.push(X[V])}}else{for(var V=0;X[V];V++){U.push(X[V])}}}return U}}var G;if(document.documentElement.compareDocumentPosition){G=function(U,T){var V=U.compareDocumentPosition(T)&4?-1:U===T?0:1;if(V===0){hasDuplicate=true}return V}}else{if("sourceIndex" in document.documentElement){G=function(U,T){var V=U.sourceIndex-T.sourceIndex;if(V===0){hasDuplicate=true}return V}}else{if(document.createRange){G=function(W,U){var V=W.ownerDocument.createRange(),T=U.ownerDocument.createRange();V.selectNode(W);V.collapse(true);T.selectNode(U);T.collapse(true);var X=V.compareBoundaryPoints(Range.START_TO_END,T);if(X===0){hasDuplicate=true}return X}}}}(function(){var U=document.createElement("form"),V="script"+(new Date).getTime();U.innerHTML="<input name='"+V+"'/>";var T=document.documentElement;T.insertBefore(U,T.firstChild);if(!!document.getElementById(V)){I.find.ID=function(X,Y,Z){if(typeof Y.getElementById!=="undefined"&&!Z){var W=Y.getElementById(X[1]);return W?W.id===X[1]||typeof W.getAttributeNode!=="undefined"&&W.getAttributeNode("id").nodeValue===X[1]?[W]:g:[]}};I.filter.ID=function(Y,W){var X=typeof Y.getAttributeNode!=="undefined"&&Y.getAttributeNode("id");return Y.nodeType===1&&X&&X.nodeValue===W}}T.removeChild(U)})();(function(){var T=document.createElement("div");T.appendChild(document.createComment(""));if(T.getElementsByTagName("*").length>0){I.find.TAG=function(U,Y){var X=Y.getElementsByTagName(U[1]);if(U[1]==="*"){var W=[];for(var V=0;X[V];V++){if(X[V].nodeType===1){W.push(X[V])}}X=W}return X}}T.innerHTML="<a href='#'></a>";if(T.firstChild&&typeof T.firstChild.getAttribute!=="undefined"&&T.firstChild.getAttribute("href")!=="#"){I.attrHandle.href=function(U){return U.getAttribute("href",2)}}})();if(document.querySelectorAll){(function(){var T=F,U=document.createElement("div");U.innerHTML="<p class='TEST'></p>";if(U.querySelectorAll&&U.querySelectorAll(".TEST").length===0){return}F=function(Y,X,V,W){X=X||document;if(!W&&X.nodeType===9&&!Q(X)){try{return E(X.querySelectorAll(Y),V)}catch(Z){}}return T(Y,X,V,W)};F.find=T.find;F.filter=T.filter;F.selectors=T.selectors;F.matches=T.matches})()}if(document.getElementsByClassName&&document.documentElement.getElementsByClassName){(function(){var T=document.createElement("div");T.innerHTML="<div class='test e'></div><div class='test'></div>";if(T.getElementsByClassName("e").length===0){return}T.lastChild.className="e";if(T.getElementsByClassName("e").length===1){return}I.order.splice(1,0,"CLASS");I.find.CLASS=function(U,V,W){if(typeof V.getElementsByClassName!=="undefined"&&!W){return V.getElementsByClassName(U[1])}}})()}function P(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W<V;W++){var T=ad[W];if(T){if(ab&&T.nodeType===1){T.sizcache=Y;T.sizset=W}T=T[U];var X=false;while(T){if(T.sizcache===Y){X=ad[T.sizset];break}if(T.nodeType===1&&!ac){T.sizcache=Y;T.sizset=W}if(T.nodeName===Z){X=T;break}T=T[U]}ad[W]=X}}}function S(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W<V;W++){var T=ad[W];if(T){if(ab&&T.nodeType===1){T.sizcache=Y;T.sizset=W}T=T[U];var X=false;while(T){if(T.sizcache===Y){X=ad[T.sizset];break}if(T.nodeType===1){if(!ac){T.sizcache=Y;T.sizset=W}if(typeof Z!=="string"){if(T===Z){X=true;break}}else{if(F.filter(Z,[T]).length>0){X=T;break}}}T=T[U]}ad[W]=X}}}var K=document.compareDocumentPosition?function(U,T){return U.compareDocumentPosition(T)&16}:function(U,T){return U!==T&&(U.contains?U.contains(T):true)};var Q=function(T){return T.nodeType===9&&T.documentElement.nodeName!=="HTML"||!!T.ownerDocument&&Q(T.ownerDocument)};var J=function(T,aa){var W=[],X="",Y,V=aa.nodeType?[aa]:aa;while((Y=I.match.PSEUDO.exec(T))){X+=Y[0];T=T.replace(I.match.PSEUDO,"")}T=I.relative[T]?T+"*":T;for(var Z=0,U=V.length;Z<U;Z++){F(T,V[Z],W)}return F.filter(X,W)};o.find=F;o.filter=F.filter;o.expr=F.selectors;o.expr[":"]=o.expr.filters;F.selectors.filters.hidden=function(T){return T.offsetWidth===0||T.offsetHeight===0};F.selectors.filters.visible=function(T){return T.offsetWidth>0||T.offsetHeight>0};F.selectors.filters.animated=function(T){return o.grep(o.timers,function(U){return T===U.elem}).length};o.multiFilter=function(V,T,U){if(U){V=":not("+V+")"}return F.matches(V,T)};o.dir=function(V,U){var T=[],W=V[U];while(W&&W!=document){if(W.nodeType==1){T.push(W)}W=W[U]}return T};o.nth=function(X,T,V,W){T=T||1;var U=0;for(;X;X=X[V]){if(X.nodeType==1&&++U==T){break}}return X};o.sibling=function(V,U){var T=[];for(;V;V=V.nextSibling){if(V.nodeType==1&&V!=U){T.push(V)}}return T};return;l.Sizzle=F})();o.event={add:function(I,F,H,K){if(I.nodeType==3||I.nodeType==8){return}if(I.setInterval&&I!=l){I=l}if(!H.guid){H.guid=this.guid++}if(K!==g){var G=H;H=this.proxy(G);H.data=K}var E=o.data(I,"events")||o.data(I,"events",{}),J=o.data(I,"handle")||o.data(I,"handle",function(){return typeof o!=="undefined"&&!o.event.triggered?o.event.handle.apply(arguments.callee.elem,arguments):g});J.elem=I;o.each(F.split(/\s+/),function(M,N){var O=N.split(".");N=O.shift();H.type=O.slice().sort().join(".");var L=E[N];if(o.event.specialAll[N]){o.event.specialAll[N].setup.call(I,K,O)}if(!L){L=E[N]={};if(!o.event.special[N]||o.event.special[N].setup.call(I,K,O)===false){if(I.addEventListener){I.addEventListener(N,J,false)}else{if(I.attachEvent){I.attachEvent("on"+N,J)}}}}L[H.guid]=H;o.event.global[N]=true});I=null},guid:1,global:{},remove:function(K,H,J){if(K.nodeType==3||K.nodeType==8){return}var G=o.data(K,"events"),F,E;if(G){if(H===g||(typeof H==="string"&&H.charAt(0)==".")){for(var I in G){this.remove(K,I+(H||""))}}else{if(H.type){J=H.handler;H=H.type}o.each(H.split(/\s+/),function(M,O){var Q=O.split(".");O=Q.shift();var N=RegExp("(^|\\.)"+Q.slice().sort().join(".*\\.")+"(\\.|$)");if(G[O]){if(J){delete G[O][J.guid]}else{for(var P in G[O]){if(N.test(G[O][P].type)){delete G[O][P]}}}if(o.event.specialAll[O]){o.event.specialAll[O].teardown.call(K,Q)}for(F in G[O]){break}if(!F){if(!o.event.special[O]||o.event.special[O].teardown.call(K,Q)===false){if(K.removeEventListener){K.removeEventListener(O,o.data(K,"handle"),false)}else{if(K.detachEvent){K.detachEvent("on"+O,o.data(K,"handle"))}}}F=null;delete G[O]}}})}for(F in G){break}if(!F){var L=o.data(K,"handle");if(L){L.elem=null}o.removeData(K,"events");o.removeData(K,"handle")}}},trigger:function(I,K,H,E){var G=I.type||I;if(!E){I=typeof I==="object"?I[h]?I:o.extend(o.Event(G),I):o.Event(G);if(G.indexOf("!")>=0){I.type=G=G.slice(0,-1);I.exclusive=true}if(!H){I.stopPropagation();if(this.global[G]){o.each(o.cache,function(){if(this.events&&this.events[G]){o.event.trigger(I,K,this.handle.elem)}})}}if(!H||H.nodeType==3||H.nodeType==8){return g}I.result=g;I.target=H;K=o.makeArray(K);K.unshift(I)}I.currentTarget=H;var J=o.data(H,"handle");if(J){J.apply(H,K)}if((!H[G]||(o.nodeName(H,"a")&&G=="click"))&&H["on"+G]&&H["on"+G].apply(H,K)===false){I.result=false}if(!E&&H[G]&&!I.isDefaultPrevented()&&!(o.nodeName(H,"a")&&G=="click")){this.triggered=true;try{H[G]()}catch(L){}}this.triggered=false;if(!I.isPropagationStopped()){var F=H.parentNode||H.ownerDocument;if(F){o.event.trigger(I,K,F,true)}}},handle:function(K){var J,E;K=arguments[0]=o.event.fix(K||l.event);K.currentTarget=this;var L=K.type.split(".");K.type=L.shift();J=!L.length&&!K.exclusive;var I=RegExp("(^|\\.)"+L.slice().sort().join(".*\\.")+"(\\.|$)");E=(o.data(this,"events")||{})[K.type];for(var G in E){var H=E[G];if(J||I.test(H.type)){K.handler=H;K.data=H.data;var F=H.apply(this,arguments);if(F!==g){K.result=F;if(F===false){K.preventDefault();K.stopPropagation()}}if(K.isImmediatePropagationStopped()){break}}}},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),fix:function(H){if(H[h]){return H}var F=H;H=o.Event(F);for(var G=this.props.length,J;G;){J=this.props[--G];H[J]=F[J]}if(!H.target){H.target=H.srcElement||document}if(H.target.nodeType==3){H.target=H.target.parentNode}if(!H.relatedTarget&&H.fromElement){H.relatedTarget=H.fromElement==H.target?H.toElement:H.fromElement}if(H.pageX==null&&H.clientX!=null){var I=document.documentElement,E=document.body;H.pageX=H.clientX+(I&&I.scrollLeft||E&&E.scrollLeft||0)-(I.clientLeft||0);H.pageY=H.clientY+(I&&I.scrollTop||E&&E.scrollTop||0)-(I.clientTop||0)}if(!H.which&&((H.charCode||H.charCode===0)?H.charCode:H.keyCode)){H.which=H.charCode||H.keyCode}if(!H.metaKey&&H.ctrlKey){H.metaKey=H.ctrlKey}if(!H.which&&H.button){H.which=(H.button&1?1:(H.button&2?3:(H.button&4?2:0)))}return H},proxy:function(F,E){E=E||function(){return F.apply(this,arguments)};E.guid=F.guid=F.guid||E.guid||this.guid++;return E},special:{ready:{setup:B,teardown:function(){}}},specialAll:{live:{setup:function(E,F){o.event.add(this,F[0],c)},teardown:function(G){if(G.length){var E=0,F=RegExp("(^|\\.)"+G[0]+"(\\.|$)");o.each((o.data(this,"events").live||{}),function(){if(F.test(this.type)){E++}});if(E<1){o.event.remove(this,G[0],c)}}}}}};o.Event=function(E){if(!this.preventDefault){return new o.Event(E)}if(E&&E.type){this.originalEvent=E;this.type=E.type}else{this.type=E}this.timeStamp=e();this[h]=true};function k(){return false}function u(){return true}o.Event.prototype={preventDefault:function(){this.isDefaultPrevented=u;var E=this.originalEvent;if(!E){return}if(E.preventDefault){E.preventDefault()}E.returnValue=false},stopPropagation:function(){this.isPropagationStopped=u;var E=this.originalEvent;if(!E){return}if(E.stopPropagation){E.stopPropagation()}E.cancelBubble=true},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=u;this.stopPropagation()},isDefaultPrevented:k,isPropagationStopped:k,isImmediatePropagationStopped:k};var a=function(F){var E=F.relatedTarget;while(E&&E!=this){try{E=E.parentNode}catch(G){E=this}}if(E!=this){F.type=F.data;o.event.handle.apply(this,arguments)}};o.each({mouseover:"mouseenter",mouseout:"mouseleave"},function(F,E){o.event.special[E]={setup:function(){o.event.add(this,F,a,E)},teardown:function(){o.event.remove(this,F,a)}}});o.fn.extend({bind:function(F,G,E){return F=="unload"?this.one(F,G,E):this.each(function(){o.event.add(this,F,E||G,E&&G)})},one:function(G,H,F){var E=o.event.proxy(F||H,function(I){o(this).unbind(I,E);return(F||H).apply(this,arguments)});return this.each(function(){o.event.add(this,G,E,F&&H)})},unbind:function(F,E){return this.each(function(){o.event.remove(this,F,E)})},trigger:function(E,F){return this.each(function(){o.event.trigger(E,F,this)})},triggerHandler:function(E,G){if(this[0]){var F=o.Event(E);F.preventDefault();F.stopPropagation();o.event.trigger(F,G,this[0]);return F.result}},toggle:function(G){var E=arguments,F=1;while(F<E.length){o.event.proxy(G,E[F++])}return this.click(o.event.proxy(G,function(H){this.lastToggle=(this.lastToggle||0)%F;H.preventDefault();return E[this.lastToggle++].apply(this,arguments)||false}))},hover:function(E,F){return this.mouseenter(E).mouseleave(F)},ready:function(E){B();if(o.isReady){E.call(document,o)}else{o.readyList.push(E)}return this},live:function(G,F){var E=o.event.proxy(F);E.guid+=this.selector+G;o(document).bind(i(G,this.selector),this.selector,E);return this},die:function(F,E){o(document).unbind(i(F,this.selector),E?{guid:E.guid+this.selector+F}:null);return this}});function c(H){var E=RegExp("(^|\\.)"+H.type+"(\\.|$)"),G=true,F=[];o.each(o.data(this,"events").live||[],function(I,J){if(E.test(J.type)){var K=o(H.target).closest(J.data)[0];if(K){F.push({elem:K,fn:J})}}});F.sort(function(J,I){return o.data(J.elem,"closest")-o.data(I.elem,"closest")});o.each(F,function(){if(this.fn.call(this.elem,H,this.fn.data)===false){return(G=false)}});return G}function i(F,E){return["live",F,E.replace(/\./g,"`").replace(/ /g,"|")].join(".")}o.extend({isReady:false,readyList:[],ready:function(){if(!o.isReady){o.isReady=true;if(o.readyList){o.each(o.readyList,function(){this.call(document,o)});o.readyList=null}o(document).triggerHandler("ready")}}});var x=false;function B(){if(x){return}x=true;if(document.addEventListener){document.addEventListener("DOMContentLoaded",function(){document.removeEventListener("DOMContentLoaded",arguments.callee,false);o.ready()},false)}else{if(document.attachEvent){document.attachEvent("onreadystatechange",function(){if(document.readyState==="complete"){document.detachEvent("onreadystatechange",arguments.callee);o.ready()}});if(document.documentElement.doScroll&&l==l.top){(function(){if(o.isReady){return}try{document.documentElement.doScroll("left")}catch(E){setTimeout(arguments.callee,0);return}o.ready()})()}}}o.event.add(l,"load",o.ready)}o.each(("blur,focus,load,resize,scroll,unload,click,dblclick,mousedown,mouseup,mousemove,mouseover,mouseout,mouseenter,mouseleave,change,select,submit,keydown,keypress,keyup,error").split(","),function(F,E){o.fn[E]=function(G){return G?this.bind(E,G):this.trigger(E)}});o(l).bind("unload",function(){for(var E in o.cache){if(E!=1&&o.cache[E].handle){o.event.remove(o.cache[E].handle.elem)}}});(function(){o.support={};var F=document.documentElement,G=document.createElement("script"),K=document.createElement("div"),J="script"+(new Date).getTime();K.style.display="none";K.innerHTML='   <link/><table></table><a href="/a" style="color:red;float:left;opacity:.5;">a</a><select><option>text</option></select><object><param/></object>';var H=K.getElementsByTagName("*"),E=K.getElementsByTagName("a")[0];if(!H||!H.length||!E){return}o.support={leadingWhitespace:K.firstChild.nodeType==3,tbody:!K.getElementsByTagName("tbody").length,objectAll:!!K.getElementsByTagName("object")[0].getElementsByTagName("*").length,htmlSerialize:!!K.getElementsByTagName("link").length,style:/red/.test(E.getAttribute("style")),hrefNormalized:E.getAttribute("href")==="/a",opacity:E.style.opacity==="0.5",cssFloat:!!E.style.cssFloat,scriptEval:false,noCloneEvent:true,boxModel:null};G.type="text/javascript";try{G.appendChild(document.createTextNode("window."+J+"=1;"))}catch(I){}F.insertBefore(G,F.firstChild);if(l[J]){o.support.scriptEval=true;delete l[J]}F.removeChild(G);if(K.attachEvent&&K.fireEvent){K.attachEvent("onclick",function(){o.support.noCloneEvent=false;K.detachEvent("onclick",arguments.callee)});K.cloneNode(true).fireEvent("onclick")}o(function(){var L=document.createElement("div");L.style.width=L.style.paddingLeft="1px";document.body.appendChild(L);o.boxModel=o.support.boxModel=L.offsetWidth===2;document.body.removeChild(L).style.display="none"})})();var w=o.support.cssFloat?"cssFloat":"styleFloat";o.props={"for":"htmlFor","class":"className","float":w,cssFloat:w,styleFloat:w,readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",tabindex:"tabIndex"};o.fn.extend({_load:o.fn.load,load:function(G,J,K){if(typeof G!=="string"){return this._load(G)}var I=G.indexOf(" ");if(I>=0){var E=G.slice(I,G.length);G=G.slice(0,I)}var H="GET";if(J){if(o.isFunction(J)){K=J;J=null}else{if(typeof J==="object"){J=o.param(J);H="POST"}}}var F=this;o.ajax({url:G,type:H,dataType:"html",data:J,complete:function(M,L){if(L=="success"||L=="notmodified"){F.html(E?o("<div/>").append(M.responseText.replace(/<script(.|\s)*?\/script>/g,"")).find(E):M.responseText)}if(K){F.each(K,[M.responseText,L,M])}}});return this},serialize:function(){return o.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?o.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password|search/i.test(this.type))}).map(function(E,F){var G=o(this).val();return G==null?null:o.isArray(G)?o.map(G,function(I,H){return{name:F.name,value:I}}):{name:F.name,value:G}}).get()}});o.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(E,F){o.fn[F]=function(G){return this.bind(F,G)}});var r=e();o.extend({get:function(E,G,H,F){if(o.isFunction(G)){H=G;G=null}return o.ajax({type:"GET",url:E,data:G,success:H,dataType:F})},getScript:function(E,F){return o.get(E,null,F,"script")},getJSON:function(E,F,G){return o.get(E,F,G,"json")},post:function(E,G,H,F){if(o.isFunction(G)){H=G;G={}}return o.ajax({type:"POST",url:E,data:G,success:H,dataType:F})},ajaxSetup:function(E){o.extend(o.ajaxSettings,E)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return l.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest()},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(M){M=o.extend(true,M,o.extend(true,{},o.ajaxSettings,M));var W,F=/=\?(&|$)/g,R,V,G=M.type.toUpperCase();if(M.data&&M.processData&&typeof M.data!=="string"){M.data=o.param(M.data)}if(M.dataType=="jsonp"){if(G=="GET"){if(!M.url.match(F)){M.url+=(M.url.match(/\?/)?"&":"?")+(M.jsonp||"callback")+"=?"}}else{if(!M.data||!M.data.match(F)){M.data=(M.data?M.data+"&":"")+(M.jsonp||"callback")+"=?"}}M.dataType="json"}if(M.dataType=="json"&&(M.data&&M.data.match(F)||M.url.match(F))){W="jsonp"+r++;if(M.data){M.data=(M.data+"").replace(F,"="+W+"$1")}M.url=M.url.replace(F,"="+W+"$1");M.dataType="script";l[W]=function(X){V=X;I();L();l[W]=g;try{delete l[W]}catch(Y){}if(H){H.removeChild(T)}}}if(M.dataType=="script"&&M.cache==null){M.cache=false}if(M.cache===false&&G=="GET"){var E=e();var U=M.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+E+"$2");M.url=U+((U==M.url)?(M.url.match(/\?/)?"&":"?")+"_="+E:"")}if(M.data&&G=="GET"){M.url+=(M.url.match(/\?/)?"&":"?")+M.data;M.data=null}if(M.global&&!o.active++){o.event.trigger("ajaxStart")}var Q=/^(\w+:)?\/\/([^\/?#]+)/.exec(M.url);if(M.dataType=="script"&&G=="GET"&&Q&&(Q[1]&&Q[1]!=location.protocol||Q[2]!=location.host)){var H=document.getElementsByTagName("head")[0];var T=document.createElement("script");T.src=M.url;if(M.scriptCharset){T.charset=M.scriptCharset}if(!W){var O=false;T.onload=T.onreadystatechange=function(){if(!O&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){O=true;I();L();T.onload=T.onreadystatechange=null;H.removeChild(T)}}}H.appendChild(T);return g}var K=false;var J=M.xhr();if(M.username){J.open(G,M.url,M.async,M.username,M.password)}else{J.open(G,M.url,M.async)}try{if(M.data){J.setRequestHeader("Content-Type",M.contentType)}if(M.ifModified){J.setRequestHeader("If-Modified-Since",o.lastModified[M.url]||"Thu, 01 Jan 1970 00:00:00 GMT")}J.setRequestHeader("X-Requested-With","XMLHttpRequest");J.setRequestHeader("Accept",M.dataType&&M.accepts[M.dataType]?M.accepts[M.dataType]+", */*":M.accepts._default)}catch(S){}if(M.beforeSend&&M.beforeSend(J,M)===false){if(M.global&&!--o.active){o.event.trigger("ajaxStop")}J.abort();return false}if(M.global){o.event.trigger("ajaxSend",[J,M])}var N=function(X){if(J.readyState==0){if(P){clearInterval(P);P=null;if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}}else{if(!K&&J&&(J.readyState==4||X=="timeout")){K=true;if(P){clearInterval(P);P=null}R=X=="timeout"?"timeout":!o.httpSuccess(J)?"error":M.ifModified&&o.httpNotModified(J,M.url)?"notmodified":"success";if(R=="success"){try{V=o.httpData(J,M.dataType,M)}catch(Z){R="parsererror"}}if(R=="success"){var Y;try{Y=J.getResponseHeader("Last-Modified")}catch(Z){}if(M.ifModified&&Y){o.lastModified[M.url]=Y}if(!W){I()}}else{o.handleError(M,J,R)}L();if(X){J.abort()}if(M.async){J=null}}}};if(M.async){var P=setInterval(N,13);if(M.timeout>0){setTimeout(function(){if(J&&!K){N("timeout")}},M.timeout)}}try{J.send(M.data)}catch(S){o.handleError(M,J,null,S)}if(!M.async){N()}function I(){if(M.success){M.success(V,R)}if(M.global){o.event.trigger("ajaxSuccess",[J,M])}}function L(){if(M.complete){M.complete(J,R)}if(M.global){o.event.trigger("ajaxComplete",[J,M])}if(M.global&&!--o.active){o.event.trigger("ajaxStop")}}return J},handleError:function(F,H,E,G){if(F.error){F.error(H,E,G)}if(F.global){o.event.trigger("ajaxError",[H,F,G])}},active:0,httpSuccess:function(F){try{return !F.status&&location.protocol=="file:"||(F.status>=200&&F.status<300)||F.status==304||F.status==1223}catch(E){}return false},httpNotModified:function(G,E){try{var H=G.getResponseHeader("Last-Modified");return G.status==304||H==o.lastModified[E]}catch(F){}return false},httpData:function(J,H,G){var F=J.getResponseHeader("content-type"),E=H=="xml"||!H&&F&&F.indexOf("xml")>=0,I=E?J.responseXML:J.responseText;if(E&&I.documentElement.tagName=="parsererror"){throw"parsererror"}if(G&&G.dataFilter){I=G.dataFilter(I,H)}if(typeof I==="string"){if(H=="script"){o.globalEval(I)}if(H=="json"){I=l["eval"]("("+I+")")}}return I},param:function(E){var G=[];function H(I,J){G[G.length]=encodeURIComponent(I)+"="+encodeURIComponent(J)}if(o.isArray(E)||E.jquery){o.each(E,function(){H(this.name,this.value)})}else{for(var F in E){if(o.isArray(E[F])){o.each(E[F],function(){H(F,this)})}else{H(F,o.isFunction(E[F])?E[F]():E[F])}}}return G.join("&").replace(/%20/g,"+")}});var m={},n,d=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];function t(F,E){var G={};o.each(d.concat.apply([],d.slice(0,E)),function(){G[this]=F});return G}o.fn.extend({show:function(J,L){if(J){return this.animate(t("show",3),J,L)}else{for(var H=0,F=this.length;H<F;H++){var E=o.data(this[H],"olddisplay");this[H].style.display=E||"";if(o.css(this[H],"display")==="none"){var G=this[H].tagName,K;if(m[G]){K=m[G]}else{var I=o("<"+G+" />").appendTo("body");K=I.css("display");if(K==="none"){K="block"}I.remove();m[G]=K}o.data(this[H],"olddisplay",K)}}for(var H=0,F=this.length;H<F;H++){this[H].style.display=o.data(this[H],"olddisplay")||""}return this}},hide:function(H,I){if(H){return this.animate(t("hide",3),H,I)}else{for(var G=0,F=this.length;G<F;G++){var E=o.data(this[G],"olddisplay");if(!E&&E!=="none"){o.data(this[G],"olddisplay",o.css(this[G],"display"))}}for(var G=0,F=this.length;G<F;G++){this[G].style.display="none"}return this}},_toggle:o.fn.toggle,toggle:function(G,F){var E=typeof G==="boolean";return o.isFunction(G)&&o.isFunction(F)?this._toggle.apply(this,arguments):G==null||E?this.each(function(){var H=E?G:o(this).is(":hidden");o(this)[H?"show":"hide"]()}):this.animate(t("toggle",3),G,F)},fadeTo:function(E,G,F){return this.animate({opacity:G},E,F)},animate:function(I,F,H,G){var E=o.speed(F,H,G);return this[E.queue===false?"each":"queue"](function(){var K=o.extend({},E),M,L=this.nodeType==1&&o(this).is(":hidden"),J=this;for(M in I){if(I[M]=="hide"&&L||I[M]=="show"&&!L){return K.complete.call(this)}if((M=="height"||M=="width")&&this.style){K.display=o.css(this,"display");K.overflow=this.style.overflow}}if(K.overflow!=null){this.style.overflow="hidden"}K.curAnim=o.extend({},I);o.each(I,function(O,S){var R=new o.fx(J,K,O);if(/toggle|show|hide/.test(S)){R[S=="toggle"?L?"show":"hide":S](I)}else{var Q=S.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),T=R.cur(true)||0;if(Q){var N=parseFloat(Q[2]),P=Q[3]||"px";if(P!="px"){J.style[O]=(N||1)+P;T=((N||1)/R.cur(true))*T;J.style[O]=T+P}if(Q[1]){N=((Q[1]=="-="?-1:1)*N)+T}R.custom(T,N,P)}else{R.custom(T,S,"")}}});return true})},stop:function(F,E){var G=o.timers;if(F){this.queue([])}this.each(function(){for(var H=G.length-1;H>=0;H--){if(G[H].elem==this){if(E){G[H](true)}G.splice(H,1)}}});if(!E){this.dequeue()}return this}});o.each({slideDown:t("show",1),slideUp:t("hide",1),slideToggle:t("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(E,F){o.fn[E]=function(G,H){return this.animate(F,G,H)}});o.extend({speed:function(G,H,F){var E=typeof G==="object"?G:{complete:F||!F&&H||o.isFunction(G)&&G,duration:G,easing:F&&H||H&&!o.isFunction(H)&&H};E.duration=o.fx.off?0:typeof E.duration==="number"?E.duration:o.fx.speeds[E.duration]||o.fx.speeds._default;E.old=E.complete;E.complete=function(){if(E.queue!==false){o(this).dequeue()}if(o.isFunction(E.old)){E.old.call(this)}};return E},easing:{linear:function(G,H,E,F){return E+F*G},swing:function(G,H,E,F){return((-Math.cos(G*Math.PI)/2)+0.5)*F+E}},timers:[],fx:function(F,E,G){this.options=E;this.elem=F;this.prop=G;if(!E.orig){E.orig={}}}});o.fx.prototype={update:function(){if(this.options.step){this.options.step.call(this.elem,this.now,this)}(o.fx.step[this.prop]||o.fx.step._default)(this);if((this.prop=="height"||this.prop=="width")&&this.elem.style){this.elem.style.display="block"}},cur:function(F){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null)){return this.elem[this.prop]}var E=parseFloat(o.css(this.elem,this.prop,F));return E&&E>-10000?E:parseFloat(o.curCSS(this.elem,this.prop))||0},custom:function(I,H,G){this.startTime=e();this.start=I;this.end=H;this.unit=G||this.unit||"px";this.now=this.start;this.pos=this.state=0;var E=this;function F(J){return E.step(J)}F.elem=this.elem;if(F()&&o.timers.push(F)&&!n){n=setInterval(function(){var K=o.timers;for(var J=0;J<K.length;J++){if(!K[J]()){K.splice(J--,1)}}if(!K.length){clearInterval(n);n=g}},13)}},show:function(){this.options.orig[this.prop]=o.attr(this.elem.style,this.prop);this.options.show=true;this.custom(this.prop=="width"||this.prop=="height"?1:0,this.cur());o(this.elem).show()},hide:function(){this.options.orig[this.prop]=o.attr(this.elem.style,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(H){var G=e();if(H||G>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var E=true;for(var F in this.options.curAnim){if(this.options.curAnim[F]!==true){E=false}}if(E){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(o.css(this.elem,"display")=="none"){this.elem.style.display="block"}}if(this.options.hide){o(this.elem).hide()}if(this.options.hide||this.options.show){for(var I in this.options.curAnim){o.attr(this.elem.style,I,this.options.orig[I])}}this.options.complete.call(this.elem)}return false}else{var J=G-this.startTime;this.state=J/this.options.duration;this.pos=o.easing[this.options.easing||(o.easing.swing?"swing":"linear")](this.state,J,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update()}return true}};o.extend(o.fx,{speeds:{slow:600,fast:200,_default:400},step:{opacity:function(E){o.attr(E.elem.style,"opacity",E.now)},_default:function(E){if(E.elem.style&&E.elem.style[E.prop]!=null){E.elem.style[E.prop]=E.now+E.unit}else{E.elem[E.prop]=E.now}}}});if(document.documentElement.getBoundingClientRect){o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}var G=this[0].getBoundingClientRect(),J=this[0].ownerDocument,F=J.body,E=J.documentElement,L=E.clientTop||F.clientTop||0,K=E.clientLeft||F.clientLeft||0,I=G.top+(self.pageYOffset||o.boxModel&&E.scrollTop||F.scrollTop)-L,H=G.left+(self.pageXOffset||o.boxModel&&E.scrollLeft||F.scrollLeft)-K;return{top:I,left:H}}}else{o.fn.offset=function(){if(!this[0]){return{top:0,left:0}}if(this[0]===this[0].ownerDocument.body){return o.offset.bodyOffset(this[0])}o.offset.initialized||o.offset.initialize();var J=this[0],G=J.offsetParent,F=J,O=J.ownerDocument,M,H=O.documentElement,K=O.body,L=O.defaultView,E=L.getComputedStyle(J,null),N=J.offsetTop,I=J.offsetLeft;while((J=J.parentNode)&&J!==K&&J!==H){M=L.getComputedStyle(J,null);N-=J.scrollTop,I-=J.scrollLeft;if(J===G){N+=J.offsetTop,I+=J.offsetLeft;if(o.offset.doesNotAddBorder&&!(o.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(J.tagName))){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}F=G,G=J.offsetParent}if(o.offset.subtractsBorderForOverflowNotVisible&&M.overflow!=="visible"){N+=parseInt(M.borderTopWidth,10)||0,I+=parseInt(M.borderLeftWidth,10)||0}E=M}if(E.position==="relative"||E.position==="static"){N+=K.offsetTop,I+=K.offsetLeft}if(E.position==="fixed"){N+=Math.max(H.scrollTop,K.scrollTop),I+=Math.max(H.scrollLeft,K.scrollLeft)}return{top:N,left:I}}}o.offset={initialize:function(){if(this.initialized){return}var L=document.body,F=document.createElement("div"),H,G,N,I,M,E,J=L.style.marginTop,K='<div style="position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;"><div></div></div><table style="position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;" cellpadding="0" cellspacing="0"><tr><td></td></tr></table>';M={position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"};for(E in M){F.style[E]=M[E]}F.innerHTML=K;L.insertBefore(F,L.firstChild);H=F.firstChild,G=H.firstChild,I=H.nextSibling.firstChild.firstChild;this.doesNotAddBorder=(G.offsetTop!==5);this.doesAddBorderForTableAndCells=(I.offsetTop===5);H.style.overflow="hidden",H.style.position="relative";this.subtractsBorderForOverflowNotVisible=(G.offsetTop===-5);L.style.marginTop="1px";this.doesNotIncludeMarginInBodyOffset=(L.offsetTop===0);L.style.marginTop=J;L.removeChild(F);this.initialized=true},bodyOffset:function(E){o.offset.initialized||o.offset.initialize();var G=E.offsetTop,F=E.offsetLeft;if(o.offset.doesNotIncludeMarginInBodyOffset){G+=parseInt(o.curCSS(E,"marginTop",true),10)||0,F+=parseInt(o.curCSS(E,"marginLeft",true),10)||0}return{top:G,left:F}}};o.fn.extend({position:function(){var I=0,H=0,F;if(this[0]){var G=this.offsetParent(),J=this.offset(),E=/^body|html$/i.test(G[0].tagName)?{top:0,left:0}:G.offset();J.top-=j(this,"marginTop");J.left-=j(this,"marginLeft");E.top+=j(G,"borderTopWidth");E.left+=j(G,"borderLeftWidth");F={top:J.top-E.top,left:J.left-E.left}}return F},offsetParent:function(){var E=this[0].offsetParent||document.body;while(E&&(!/^body|html$/i.test(E.tagName)&&o.css(E,"position")=="static")){E=E.offsetParent}return o(E)}});o.each(["Left","Top"],function(F,E){var G="scroll"+E;o.fn[G]=function(H){if(!this[0]){return null}return H!==g?this.each(function(){this==l||this==document?l.scrollTo(!F?H:o(l).scrollLeft(),F?H:o(l).scrollTop()):this[G]=H}):this[0]==l||this[0]==document?self[F?"pageYOffset":"pageXOffset"]||o.boxModel&&document.documentElement[G]||document.body[G]:this[0][G]}});o.each(["Height","Width"],function(I,G){var E=I?"Left":"Top",H=I?"Right":"Bottom",F=G.toLowerCase();o.fn["inner"+G]=function(){return this[0]?o.css(this[0],F,false,"padding"):null};o.fn["outer"+G]=function(K){return this[0]?o.css(this[0],F,false,K?"margin":"border"):null};var J=G.toLowerCase();o.fn[J]=function(K){return this[0]==l?document.compatMode=="CSS1Compat"&&document.documentElement["client"+G]||document.body["client"+G]:this[0]==document?Math.max(document.documentElement["client"+G],document.body["scroll"+G],document.documentElement["scroll"+G],document.body["offset"+G],document.documentElement["offset"+G]):K===g?(this.length?o.css(this[0],J):null):this.css(J,typeof K==="string"?K:K+"px")}})})();
\ No newline at end of file
Binary file bundled/werkzeug/werkzeug/debug/shared/less.png has changed
Binary file bundled/werkzeug/werkzeug/debug/shared/more.png has changed
Binary file bundled/werkzeug/werkzeug/debug/shared/source.png has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/style.css	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,93 @@
+body, input  { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+               'Verdana', sans-serif; background-color: #AFC1C4; color: #000;
+               text-align: center; margin: 1em; padding: 0; font-size: 15px; }
+input        { background-color: #fff; margin: 0; text-align: left; }
+a            { color: #11557C; }
+a:hover      { color: #177199; }
+pre, code, table.source,
+textarea     { font-family: 'Consolas', 'Deja Vu Sans Mono',
+               'Bitstream Vera Sans Mono', monospace; font-size: 13px; }
+
+div.debugger { text-align: left; padding: 12px; margin: auto;
+               border: 1px solid #aaa; background-color: white; }
+h1           { color: #11557C; font-size: 30px; margin: 0 0 0.3em 0; }
+div.detail p { margin: 0 0 8px 13px; font-size: 14px; }
+div.explanation { margin: 13px; font-size: 11px; }
+div.footer   { background-color: #E3EFF1; font-size: 0.8em; text-align: right;
+               padding: 6px 8px 6px 0; margin: 30px -12px -12px -12px;
+               color: #86989B; }
+
+h2           { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 5px;
+               background-color: #11557C; color: white; }
+h2 em        { font-style: normal; color: #A5D6D9; font-weight: normal; }
+
+div.traceback, div.plain { background-color: #eee!important; border: 1px solid #ccc;
+                           margin: 0 0 1em 0; padding: 10px; }
+div.plain p      { margin: 0; }
+div.plain textarea,
+div.plain pre { margin: 10px 0 0 0; padding: 4px; background-color: #333;
+                border: 1px solid #111; color: white; }
+div.plain textarea { width: 99%; height: 300px; }
+div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
+div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
+div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
+div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
+                    background-color: #ccc; border-top: 1px solid #aaa;
+                    border-left: 1px solid #aaa; border-right: 1px solid #fafafa;
+                    border-bottom: 1px solid #fafafa; }
+div.traceback pre,
+div.box table.source { white-space: pre-wrap;       /* css-3 should we be so lucky... */
+                       white-space: -moz-pre-wrap;  /* Mozilla, since 1999 */
+                       white-space: -pre-wrap;      /* Opera 4-6 ?? */
+                       white-space: -o-pre-wrap;    /* Opera 7 ?? */
+                       word-wrap: break-word;       /* Internet Explorer 5.5+ */
+                       _white-space: pre;           /* IE only hack to re-specify in
+                                                    addition to word-wrap  */ }
+div.traceback pre:hover { background-color: #fafafa; color: black; cursor: pointer; }
+div.traceback blockquote { margin: 1em 0 0 0; padding: 0; }
+div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
+div.traceback img:hover { background-color: #ddd; cursor: pointer; }
+div.traceback pre:hover img { display: block; }
+
+pre.console { background-color: #fafafa!important; color: black; padding: 5px!important;
+              margin: 3px 0 0 0!important; cursor: default!important;
+              max-height: 400px; overflow: auto; }
+pre.console form { color: #555; }
+pre.console input { background-color: #fafafa; color: #555;
+                    width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
+                    'Bitstream Vera Sans Mono', monospace; font-size: 13px;
+                     border: none!important; }
+
+span.string { color: #30799B; }
+span.number { color: #9C1A1C; }
+span.object { color: #485F6E; }
+span.extended { opacity: 0.5; }
+span.extended:hover { opacity: 1; }
+a.toggle { text-decoration: none; background-repeat: no-repeat;
+           background-position: center center;
+           background-image: url(./__debugger__?cmd=resource&f=more.png); }
+a.toggle:hover { background-color: #444; }
+a.open { background-image: url(./__debugger__?cmd=resource&f=less.png); }
+
+pre.console div.traceback { margin: 5px 0 5px 25px; white-space: normal; }
+pre.console div.traceback h3 { background-color: #555; color: white;
+                               margin: -10px -10px 5px -10px; padding: 5px; }
+pre.console div.traceback pre:hover { background-color: #ccc; cursor: default; }
+
+pre.console div.box { margin: 5px 0 5px 25px; white-space: normal;
+                      border: 1px solid #ddd; }
+pre.console div.box h3 { background-color: #555; color: white;
+                         margin: 0; padding: 5px; }
+pre.console div.box div.repr { padding: 8px; background-color: white; }
+pre.console div.box table { margin-top: 6px; }
+pre.console div.box pre.help { background-color: white; font-size: 12px; }
+pre.console div.box pre.help:hover { cursor: default; }
+pre.console table tr { vertical-align: top; }
+div.box table.source { background-color: #fafafa; font-size: 12px;
+                       border-collapse: collapse; width: 100%; }
+div.box table.source td { border-top: 1px solid #eee; padding: 4px 0 4px 10px; }
+div.box table.source td.lineno { color: #999; padding-right: 10px; width: 1px; }
+div.box table.source tr.in-frame { background-color: #D6EEFF; }
+div.box table.source tr.current { background-color: white; }
+div.sourceview { max-height: 400px; overflow: auto; border: 1px solid #ccc; }
+div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/shared/vartable.tmpl	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,16 @@
+<table class="vars">
+<% if type == 'empty' %>
+  <tr><th>no data given</th></tr>
+<% elif type == 'simple' %>
+  <tr><td class="value">$escape(value)</td></tr>
+<% elif type == 'dict' %>
+  <tr><th>Name</th><th>Value</th></tr>
+  <% for key, item in value %>
+  <tr><td class="name">$escape(key)</td><td class="value">$escape(item)</td></tr>
+  <% endfor %>
+<% elif type == 'list' %>
+  <% for item in value %>
+  <tr><td class="value">$escape(item)</td></tr>
+  <% endfor %>
+<% endif %>
+</table>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/tbtools.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.tbtools
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides various traceback related utility functions.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD.
+"""
+import re
+import os
+import sys
+import inspect
+import traceback
+import codecs
+from tokenize import TokenError
+from werkzeug.utils import cached_property
+from werkzeug.debug.console import Console
+from werkzeug.debug.utils import render_template
+
+_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
+_line_re = re.compile(r'^(.*?)$(?m)')
+_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
+UTF8_COOKIE = '\xef\xbb\xbf'
+
+system_exceptions = (SystemExit, KeyboardInterrupt)
+try:
+    system_exceptions += (GeneratorExit,)
+except NameError:
+    pass
+
+
+def get_current_traceback(ignore_system_exceptions=False,
+                          show_hidden_frames=False, skip=0):
+    """Get the current exception info as `Traceback` object.  Per default
+    calling this method will reraise system exceptions such as generator exit,
+    system exit or others.  This behavior can be disabled by passing `False`
+    to the function as first parameter.
+    """
+    exc_type, exc_value, tb = sys.exc_info()
+    if ignore_system_exceptions and exc_type in system_exceptions:
+        raise
+    for x in xrange(skip):
+        if tb.tb_next is None:
+            break
+        tb = tb.tb_next
+    tb = Traceback(exc_type, exc_value, tb)
+    if not show_hidden_frames:
+        tb.filter_hidden_frames()
+    return tb
+
+
+class Line(object):
+    """Helper for the source renderer."""
+    __slots__ = ('lineno', 'code', 'in_frame', 'current')
+
+    def __init__(self, lineno, code):
+        self.lineno = lineno
+        self.code = code
+        self.in_frame = False
+        self.current = False
+
+    def classes(self):
+        rv = ['line']
+        if self.in_frame:
+            rv.append('in-frame')
+        if self.current:
+            rv.append('current')
+        return rv
+    classes = property(classes)
+
+
+class Traceback(object):
+    """Wraps a traceback."""
+
+    def __init__(self, exc_type, exc_value, tb):
+        self.exc_type = exc_type
+        self.exc_value = exc_value
+        if not isinstance(exc_type, str):
+            exception_type = exc_type.__name__
+            if exc_type.__module__ not in ('__builtin__', 'exceptions'):
+                exception_type = exc_type.__module__ + '.' + exception_type
+        else:
+            exception_type = exc_type
+        self.exception_type = exception_type
+
+        # we only add frames to the list that are not hidden.  This follows
+        # the the magic variables as defined by paste.exceptions.collector
+        self.frames = []
+        while tb:
+            self.frames.append(Frame(exc_type, exc_value, tb))
+            tb = tb.tb_next
+
+    def filter_hidden_frames(self):
+        """Remove the frames according to the paste spec."""
+        new_frames = []
+        hidden = False
+        for frame in self.frames:
+            hide = frame.hide
+            if hide in ('before', 'before_and_this'):
+                new_frames = []
+                hidden = False
+                if hide == 'before_and_this':
+                    continue
+            elif hide in ('reset', 'reset_and_this'):
+                hidden = False
+                if hide == 'reset_and_this':
+                    continue
+            elif hide in ('after', 'after_and_this'):
+                hidden = True
+                if hide == 'after_and_this':
+                    continue
+            elif hide or hidden:
+                continue
+            new_frames.append(frame)
+
+        # if the last frame is missing something went terrible wrong :(
+        if self.frames[-1] in new_frames:
+            self.frames[:] = new_frames
+
+    def is_syntax_error(self):
+        """Is it a syntax error?"""
+        return isinstance(self.exc_value, SyntaxError)
+    is_syntax_error = property(is_syntax_error)
+
+    def exception(self):
+        """String representation of the exception."""
+        buf = traceback.format_exception_only(self.exc_type, self.exc_value)
+        return ''.join(buf).strip().decode('utf-8', 'replace')
+    exception = property(exception)
+
+    def log(self, logfile=None):
+        """Log the ASCII traceback into a file object."""
+        if logfile is None:
+            logfile = sys.stderr
+        tb = self.plaintext.encode('utf-8', 'replace').rstrip() + '\n'
+        logfile.write(tb)
+
+    def paste(self):
+        """Create a paste and return the paste id."""
+        from xmlrpclib import ServerProxy
+        srv = ServerProxy('http://paste.pocoo.org/xmlrpc/')
+        return srv.pastes.newPaste('pytb', self.plaintext)
+
+    def render_summary(self, include_title=True):
+        """Render the traceback for the interactive console."""
+        return render_template('traceback_summary.html', traceback=self,
+                               include_title=include_title)
+
+    def render_full(self, evalex=False):
+        """Render the Full HTML page with the traceback info."""
+        return render_template('traceback_full.html', traceback=self,
+                               evalex=evalex)
+
+    def plaintext(self):
+        return render_template('traceback_plaintext.html', traceback=self)
+    plaintext = cached_property(plaintext)
+
+    id = property(lambda x: id(x))
+
+
+class Frame(object):
+    """A single frame in a traceback."""
+
+    def __init__(self, exc_type, exc_value, tb):
+        self.lineno = tb.tb_lineno
+        self.function_name = tb.tb_frame.f_code.co_name
+        self.locals = tb.tb_frame.f_locals
+        self.globals = tb.tb_frame.f_globals
+
+        fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
+        if fn[-4:] in ('.pyo', '.pyc'):
+            fn = fn[:-1]
+        # if it's a file on the file system resolve the real filename.
+        if os.path.isfile(fn):
+            fn = os.path.realpath(fn)
+        self.filename = fn
+        self.module = self.globals.get('__name__')
+        self.loader = self.globals.get('__loader__')
+        self.code = tb.tb_frame.f_code
+
+        # support for paste's traceback extensions
+        self.hide = self.locals.get('__traceback_hide__', False)
+        info = self.locals.get('__traceback_info__')
+        if info is not None:
+            try:
+                info = unicode(info)
+            except UnicodeError:
+                info = str(info).decode('utf-8', 'replace')
+        self.info = info
+
+    def render(self):
+        """Render a single frame in a traceback."""
+        return render_template('frame.html', frame=self)
+
+    def render_source(self):
+        """Render the sourcecode."""
+        lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
+
+        # find function definition and mark lines
+        if hasattr(self.code, 'co_firstlineno'):
+            lineno = self.code.co_firstlineno - 1
+            while lineno > 0:
+                if _funcdef_re.match(lines[lineno].code):
+                    break
+                lineno -= 1
+            try:
+                offset = len(inspect.getblock([x.code + '\n' for x
+                                               in lines[lineno:]]))
+            except TokenError:
+                offset = 0
+            for line in lines[lineno:lineno + offset]:
+                line.in_frame = True
+
+        # mark current line
+        try:
+            lines[self.lineno - 1].current = True
+        except IndexError:
+            pass
+
+        return render_template('source.html', frame=self, lines=lines)
+
+    def eval(self, code, mode='single'):
+        """Evaluate code in the context of the frame."""
+        if isinstance(code, basestring):
+            if isinstance(code, unicode):
+                code = UTF8_COOKIE + code.encode('utf-8')
+            code = compile(code, '<interactive>', mode)
+        if mode != 'exec':
+            return eval(code, self.globals, self.locals)
+        exec code in self.globals, self.locals
+
+    @cached_property
+    def sourcelines(self):
+        """The sourcecode of the file as list of unicode strings."""
+        # get sourcecode from loader or file
+        source = None
+        if self.loader is not None:
+            try:
+                if hasattr(self.loader, 'get_source'):
+                    source = self.loader.get_source(self.module)
+                elif hasattr(self.loader, 'get_source_by_code'):
+                    source = self.loader.get_source_by_code(self.code)
+            except:
+                # we munch the exception so that we don't cause troubles
+                # if the loader is broken.
+                pass
+
+        if source is None:
+            try:
+                f = file(self.filename)
+            except IOError:
+                return []
+            try:
+                source = f.read()
+            finally:
+                f.close()
+
+        # already unicode?  return right away
+        if isinstance(source, unicode):
+            return source.splitlines()
+
+        # yes. it should be ascii, but we don't want to reject too many
+        # characters in the debugger if something breaks
+        charset = 'utf-8'
+        if source.startswith(UTF8_COOKIE):
+            source = source[3:]
+        else:
+            for idx, match in enumerate(_line_re.finditer(source)):
+                match = _line_re.search(match.group())
+                if match is not None:
+                    charset = match.group(1)
+                    break
+                if idx > 1:
+                    break
+
+        # on broken cookies we fall back to utf-8 too
+        try:
+            codecs.lookup(charset)
+        except LookupError:
+            charset = 'utf-8'
+
+        return source.decode(charset, 'replace').splitlines()
+
+    @property
+    def current_line(self):
+        try:
+            return self.sourcelines[self.lineno - 1]
+        except IndexError:
+            return u''
+
+    @cached_property
+    def console(self):
+        return Console(self.globals, self.locals)
+
+    id = property(lambda x: id(x))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/console.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>Console // Werkzeug Debugger</title>
+    <link rel="stylesheet" href="./__debugger__?cmd=resource&amp;f=style.css" type="text/css">
+    <script type="text/javascript" src="./__debugger__?cmd=resource&amp;f=jquery.js"></script>
+    <script type="text/javascript" src="./__debugger__?cmd=resource&amp;f=debugger.js"></script>
+    <script type="text/javascript">
+      var EVALEX = true,
+          CONSOLE_MODE = true;
+    </script>
+  </head>
+  <body>
+    <div class="debugger">
+      <h1>Interactive Console</h1>
+      <div class="explanation">
+        In this console you can execute Python expressions in the context of the
+        application.  The initial namespace was created by the debugger automatically.
+      </div>
+      <div class="console"><div class="inner">The Console requires JavaScript.</div></div>
+      <div class="footer">
+        Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
+        friendly Werkzeug powered traceback interpreter.
+      </div>
+    </div>
+  </body>
+</html>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/dump_object.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,14 @@
+<div class="box">
+  <h3>$escape(title)</h3>
+  <% if repr %>
+    <div class="repr">$repr</div>
+  <% endif %>
+  <table>
+  <% for key, value in items %>
+    <tr>
+      <th>$escape(key)</th>
+      <td>$value</td>
+    </tr>
+  <% endfor %>
+  </table>
+</div>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/frame.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,6 @@
+<div class="frame" id="frame-$frame.id">
+  <h4>File <cite class="filename">"$escape(frame.filename)"</cite>,
+      line <em class="line">$frame.lineno</em>,
+      in <code class="function">$escape(frame.function_name)</code></h4>
+  <pre>${escape(frame.current_line.strip())}</pre>
+</div>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/help_command.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,10 @@
+<%py missing = object() %>
+<div class="box">
+  <% if title and text %>
+    <h3>$title</h3>
+    <pre class="help">$text</pre>
+  <% else %>
+    <h3>Help</h3>
+    <p>Type help(object) for help about object.</p>
+  <% endif %>
+</div>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/source.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,8 @@
+<table class="source">
+<% for line in lines %>
+  <tr class="${' '.join(line.classes)}">
+    <td class="lineno">${line.lineno}</td>
+    <td>$escape(line.code)</td>
+  </tr>
+<% endfor %>
+</table>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/traceback_full.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>$escape(traceback.exception) // Werkzeug Debugger</title>
+    <link rel="stylesheet" href="./__debugger__?cmd=resource&amp;f=style.css" type="text/css">
+    <script type="text/javascript" src="./__debugger__?cmd=resource&amp;f=jquery.js"></script>
+    <script type="text/javascript" src="./__debugger__?cmd=resource&amp;f=debugger.js"></script>
+    <script type="text/javascript">
+      var TRACEBACK = $traceback.id,
+          CONSOLE_MODE = false,
+          EVALEX = ${evalex and 'true' or 'false'};
+    </script>
+  </head>
+  <body>
+    <div class="debugger">
+      <h1>$escape(traceback.exception_type)</h1>
+      <div class="detail">
+        <p class="errormsg">$escape(traceback.exception)</p>
+      </div>
+      <h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
+      $traceback.render_summary(include_title=False)
+      <div class="plain">
+        <form action="http://paste.pocoo.org/" method="post">
+          <p>
+            <input type="hidden" name="language" value="pytb">
+            This is the Copy/Paste friendly version of the traceback.  <span
+            class="pastemessage">You can also paste this traceback into the public
+            lodgeit pastebin: <input type="submit" value="create paste"></span>
+          </p>
+          <textarea cols="50" rows="10" name="code" readonly>$escape(traceback.plaintext)</textarea>
+        </form>
+      </div>
+      <div class="explanation">
+        The debugger caught an exception in your WSGI application.  You can now
+        look at the traceback which led to the error.  <span class="nojavascript">
+        If you enable JavaScript you can also use additional features such as code
+        execution (if the evalex feature is enabled), automatic pasting of the
+        exceptions and much more.</span>
+      </div>
+      <div class="footer">
+        Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
+        friendly Werkzeug powered traceback interpreter.
+      </div>
+    </div>
+  </body>
+</html>
+<!--
+
+<%py
+  import re
+  print re.sub('-{2,}', '-', traceback.plaintext)
+%>
+
+-->
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/traceback_plaintext.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,6 @@
+Traceback (most recent call last):
+<% for frame in traceback.frames %>
+  File "$frame.filename", line $frame.lineno, in $frame.function_name
+    $frame.current_line.strip()
+<% endfor %>
+$traceback.exception
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/templates/traceback_summary.html	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,23 @@
+<div class="traceback">
+  <% if traceback.is_syntax_error %>
+    <% if include_title %>
+      <h3>Syntax Error</h3>
+    <% endif %>
+    <ul>
+    <% for frame in traceback.frames %>
+      <li>$frame.render()</li>
+    <% endfor %>
+    </ul>
+    <pre>$escape(traceback.exception)</pre>
+  <% else %>
+    <% if include_title %>
+      <h3>Traceback <em>(most recent call last)</em>:</h3>
+    <% endif %>
+    <ul>
+    <% for frame in traceback.frames %>
+      <li<% if frame.info %> title="$escape(frame.info, True)"<% endif %>>$frame.render()</li>
+    <% endfor %>
+    </ul>
+    <blockquote>$escape(traceback.exception)</blockquote>
+  <% endif %>
+</div>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/debug/utils.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.utils
+    ~~~~~~~~~~~~~~~~~~~~
+
+    Various other utilities.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD.
+"""
+from os.path import join, dirname
+from werkzeug.templates import Template
+
+
+def get_template(filename):
+    return Template.from_file(join(dirname(__file__), 'templates', filename))
+
+
+def render_template(template_filename, **context):
+    return get_template(template_filename).render(**context)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/exceptions.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,459 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.exceptions
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements a number of Python exceptions you can raise from
+    within your views to trigger a standard non-200 response.
+
+
+    Usage Example
+    -------------
+
+    ::
+
+        from werkzeug import BaseRequest, responder
+        from werkzeug.exceptions import HTTPException, NotFound
+
+        def view(request):
+            raise NotFound()
+
+        @responder
+        def application(environ, start_response):
+            request = BaseRequest(environ)
+            try:
+                return view(request)
+            except HTTPException, e:
+                return e
+
+
+    As you can see from this example those exceptions are callable WSGI
+    applications.  Because of Python 2.4 compatibility those do not extend
+    from the response objects but only from the python exception class.
+
+    As a matter of fact they are not Werkzeug response objects.  However you
+    can get a response object by calling ``get_response()`` on a HTTP
+    exception.
+
+    Keep in mind that you have to pass an environment to ``get_response()``
+    because some errors fetch additional information from the WSGI
+    environment.
+
+    If you want to hook in a different exception page to say, a 404 status
+    code, you can add a second except for a specific subclass of an error::
+
+        @responder
+        def application(environ, start_response):
+            request = BaseRequest(environ)
+            try:
+                return view(request)
+            except NotFound, e:
+                return not_found(request)
+            except HTTPException, e:
+                return e
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+from werkzeug._internal import HTTP_STATUS_CODES, _get_environ
+
+
+class HTTPException(Exception):
+    """
+    Baseclass for all HTTP exceptions.  This exception can be called as WSGI
+    application to render a default error page or you can catch the subclasses
+    of it independently and render nicer error messages.
+    """
+
+    code = None
+    description = None
+
+    def __init__(self, description=None):
+        Exception.__init__(self, '%d %s' % (self.code, self.name))
+        if description is not None:
+            self.description = description
+
+    @classmethod
+    def wrap(cls, exception, name=None):
+        """This method returns a new subclass of the exception provided that
+        also is a subclass of `BadRequest`.
+        """
+        class newcls(cls, exception):
+            def __init__(self, arg=None, description=None):
+                cls.__init__(self, description)
+                exception.__init__(self, arg)
+        newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
+        newcls.__name__ = name or cls.__name__ + exception.__name__
+        return newcls
+
+    @property
+    def name(self):
+        """The status name."""
+        return HTTP_STATUS_CODES[self.code]
+
+    def get_description(self, environ):
+        """Get the description."""
+        environ = _get_environ(environ)
+        return self.description
+
+    def get_body(self, environ):
+        """Get the HTML body."""
+        return (
+            '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+            '<title>%(code)s %(name)s</title>\n'
+            '<h1>%(name)s</h1>\n'
+            '%(description)s\n'
+        ) % {
+            'code':         self.code,
+            'name':         escape(self.name),
+            'description':  self.get_description(environ)
+        }
+
+    def get_headers(self, environ):
+        """Get a list of headers."""
+        return [('Content-Type', 'text/html')]
+
+    def get_response(self, environ):
+        """Get a response object.
+
+        :param environ: the environ for the request.
+        :return: a :class:`BaseResponse` object or a subclass thereof.
+        """
+        # lazily imported for various reasons.  For one, we can use the exceptions
+        # with custom responses (testing exception instances against types) and
+        # so we don't ever have to import the wrappers, but also because there
+        # are circular dependencies when bootstrapping the module.
+        environ = _get_environ(environ)
+        from werkzeug.wrappers import BaseResponse
+        headers = self.get_headers(environ)
+        return BaseResponse(self.get_body(environ), self.code, headers)
+
+    def __call__(self, environ, start_response):
+        """Call the exception as WSGI application.
+
+        :param environ: the WSGI environment.
+        :param start_response: the response callable provided by the WSGI
+                               server.
+        """
+        response = self.get_response(environ)
+        return response(environ, start_response)
+
+    def __str__(self):
+        return unicode(self).encode('utf-8')
+
+    def __unicode__(self):
+        if 'description' in self.__dict__:
+            txt = self.description
+        else:
+            txt = self.name
+        return '%d: %s' % (self.code, txt)
+
+    def __repr__(self):
+        return '<%s \'%s\'>' % (self.__class__.__name__, self)
+
+
+class _ProxyException(HTTPException):
+    """An HTTP exception that expands renders a WSGI application on error."""
+
+    def __init__(self, response):
+        Exception.__init__(self, 'proxy exception for %r' % response)
+        self.response = response
+
+    def get_response(self, environ):
+        return self.response
+
+
+class BadRequest(HTTPException):
+    """*400* `Bad Request`
+
+    Raise if the browser sends something to the application the application
+    or server cannot handle.
+    """
+    code = 400
+    description = (
+        '<p>The browser (or proxy) sent a request that this server could '
+        'not understand.</p>'
+    )
+
+
+class Unauthorized(HTTPException):
+    """*401* `Unauthorized`
+
+    Raise if the user is not authorized.  Also used if you want to use HTTP
+    basic auth.
+    """
+    code = 401
+    description = (
+        '<p>The server could not verify that you are authorized to access '
+        'the URL requested.  You either supplied the wrong credentials (e.g. '
+        'a bad password), or your browser doesn\'t understand how to supply '
+        'the credentials required.</p><p>In case you are allowed to request '
+        'the document, please check your user-id and password and try '
+        'again.</p>'
+    )
+
+
+class Forbidden(HTTPException):
+    """*403* `Forbidden`
+
+    Raise if the user doesn't have the permission for the requested resource
+    but was authenticated.
+    """
+    code = 403
+    description = (
+        '<p>You don\'t have the permission to access the requested resource. '
+        'It is either read-protected or not readable by the server.</p>'
+    )
+
+
+class NotFound(HTTPException):
+    """*404* `Not Found`
+
+    Raise if a resource does not exist and never existed.
+    """
+    code = 404
+    description = (
+        '<p>The requested URL was not found on the server.</p>'
+        '<p>If you entered the URL manually please check your spelling and '
+        'try again.</p>'
+    )
+
+
+class MethodNotAllowed(HTTPException):
+    """*405* `Method Not Allowed`
+
+    Raise if the server used a method the resource does not handle.  For
+    example `POST` if the resource is view only.  Especially useful for REST.
+
+    The first argument for this exception should be a list of allowed methods.
+    Strictly speaking the response would be invalid if you don't provide valid
+    methods in the header which you can do with that list.
+    """
+    code = 405
+
+    def __init__(self, valid_methods=None, description=None):
+        """Takes an optional list of valid http methods
+        starting with werkzeug 0.3 the list will be mandatory."""
+        HTTPException.__init__(self, description)
+        self.valid_methods = valid_methods
+
+    def get_headers(self, environ):
+        headers = HTTPException.get_headers(self, environ)
+        if self.valid_methods:
+            headers.append(('Allow', ', '.join(self.valid_methods)))
+        return headers
+
+    def get_description(self, environ):
+        m = escape(environ.get('REQUEST_METHOD', 'GET'))
+        return '<p>The method %s is not allowed for the requested URL.</p>' % m
+
+
+class NotAcceptable(HTTPException):
+    """*406* `Not Acceptable`
+
+    Raise if the server can't return any content conforming to the
+    `Accept` headers of the client.
+    """
+    code = 406
+
+    description = (
+        '<p>The resource identified by the request is only capable of '
+        'generating response entities which have content characteristics '
+        'not acceptable according to the accept headers sent in the '
+        'request.</p>'
+        )
+
+
+class RequestTimeout(HTTPException):
+    """*408* `Request Timeout`
+
+    Raise to signalize a timeout.
+    """
+    code = 408
+    description = (
+        '<p>The server closed the network connection because the browser '
+        'didn\'t finish the request within the specified time.</p>'
+    )
+
+
+class Gone(HTTPException):
+    """*410* `Gone`
+
+    Raise if a resource existed previously and went away without new location.
+    """
+    code = 410
+    description = (
+        '<p>The requested URL is no longer available on this server and '
+        'there is no forwarding address.</p><p>If you followed a link '
+        'from a foreign page, please contact the author of this page.'
+    )
+
+
+class LengthRequired(HTTPException):
+    """*411* `Length Required`
+
+    Raise if the browser submitted data but no ``Content-Length`` header which
+    is required for the kind of processing the server does.
+    """
+    code = 411
+    description = (
+        '<p>A request with this method requires a valid <code>Content-'
+        'Length</code> header.</p>'
+    )
+
+
+class PreconditionFailed(HTTPException):
+    """*412* `Precondition Failed`
+
+    Status code used in combination with ``If-Match``, ``If-None-Match``, or
+    ``If-Unmodified-Since``.
+    """
+    code = 412
+    description = (
+        '<p>The precondition on the request for the URL failed positive '
+        'evaluation.</p>'
+    )
+
+
+class RequestEntityTooLarge(HTTPException):
+    """*413* `Request Entity Too Large`
+
+    The status code one should return if the data submitted exceeded a given
+    limit.
+    """
+    code = 413
+    description = (
+        '<p>The data value transmitted exceeds the capacity limit.</p>'
+    )
+
+
+class RequestURITooLarge(HTTPException):
+    """*414* `Request URI Too Large`
+
+    Like *413* but for too long URLs.
+    """
+    code = 414
+    description = (
+        '<p>The length of the requested URL exceeds the capacity limit '
+        'for this server.  The request cannot be processed.</p>'
+    )
+
+
+class UnsupportedMediaType(HTTPException):
+    """*415* `Unsupported Media Type`
+
+    The status code returned if the server is unable to handle the media type
+    the client transmitted.
+    """
+    code = 415
+    description = (
+        '<p>The server does not support the media type transmitted in '
+        'the request.</p>'
+    )
+
+
+class InternalServerError(HTTPException):
+    """*500* `Internal Server Error`
+
+    Raise if an internal server error occurred.  This is a good fallback if an
+    unknown error occurred in the dispatcher.
+    """
+    code = 500
+    description = (
+        '<p>The server encountered an internal error and was unable to '
+        'complete your request.  Either the server is overloaded or there '
+        'is an error in the application.</p>'
+    )
+
+
+class NotImplemented(HTTPException):
+    """*501* `Not Implemented`
+
+    Raise if the application does not support the action requested by the
+    browser.
+    """
+    code = 501
+    description = (
+        '<p>The server does not support the action requested by the '
+        'browser.</p>'
+    )
+
+
+class BadGateway(HTTPException):
+    """*502* `Bad Gateway`
+
+    If you do proxying in your application you should return this status code
+    if you received an invalid response from the upstream server it accessed
+    in attempting to fulfill the request.
+    """
+    code = 502
+    description = (
+        '<p>The proxy server received an invalid response from an upstream '
+        'server.</p>'
+    )
+
+
+class ServiceUnavailable(HTTPException):
+    """*503* `Service Unavailable`
+
+    Status code you should return if a service is temporarily unavailable.
+    """
+    code = 503
+    description = (
+        '<p>The server is temporarily unable to service your request due to '
+        'maintenance downtime or capacity problems.  Please try again '
+        'later.</p>'
+    )
+
+
+default_exceptions = {}
+__all__ = ['HTTPException']
+
+def _find_exceptions():
+    for name, obj in globals().iteritems():
+        try:
+            if getattr(obj, 'code', None) is not None:
+                default_exceptions[obj.code] = obj
+                __all__.append(obj.__name__)
+        except TypeError: # pragma: no cover
+            continue
+_find_exceptions()
+del _find_exceptions
+
+
+#: raised by the request functions if they were unable to decode the
+#: incoming data properly.
+HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError')
+
+
+class Aborter(object):
+    """
+    When passed a dict of code -> exception items it can be used as
+    callable that raises exceptions.  If the first argument to the
+    callable is an integer it will be looked up in the mapping, if it's
+    a WSGI application it will be raised in a proxy exception.
+
+    The rest of the arguments are forwarded to the exception constructor.
+    """
+
+    def __init__(self, mapping=None, extra=None):
+        if mapping is None:
+            mapping = default_exceptions
+        self.mapping = dict(mapping)
+        if extra is not None:
+            self.mapping.update(extra)
+
+    def __call__(self, code, *args, **kwargs):
+        if not args and not kwargs and not isinstance(code, (int, long)):
+            raise _ProxyException(code)
+        if code not in self.mapping:
+            raise LookupError('no exception for %r' % code)
+        raise self.mapping[code](*args, **kwargs)
+
+abort = Aborter()
+
+
+# imported here because of circular dependencies of werkzeug.utils
+from werkzeug.utils import escape
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/formparser.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.formparser
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements the form parsing.  It supports url-encoded forms
+    as well as non-nested multipart uploads.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+from cStringIO import StringIO
+from tempfile import TemporaryFile
+from itertools import chain, repeat
+
+from werkzeug._internal import _decode_unicode, _empty_stream
+from werkzeug.urls import url_decode
+from werkzeug.wsgi import LimitedStream, make_line_iter
+from werkzeug.exceptions import RequestEntityTooLarge
+from werkzeug.datastructures import Headers, FileStorage, MultiDict
+from werkzeug.http import parse_options_header
+
+
+#: an iterator that yields empty strings
+_empty_string_iter = repeat('')
+
+#: a regular expression for multipart boundaries
+_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
+
+#: supported http encodings that are also available in python we support
+#: for multipart messages.
+_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
+
+
+def default_stream_factory(total_content_length, filename, content_type,
+                           content_length=None):
+    """The stream factory that is used per default."""
+    if total_content_length > 1024 * 500:
+        return TemporaryFile('wb+')
+    return StringIO()
+
+
+def parse_form_data(environ, stream_factory=None, charset='utf-8',
+                    errors='ignore', max_form_memory_size=None,
+                    max_content_length=None, cls=None,
+                    silent=True):
+    """Parse the form data in the environ and return it as tuple in the form
+    ``(stream, form, files)``.  You should only call this method if the
+    transport method is `POST` or `PUT`.
+
+    If the mimetype of the data transmitted is `multipart/form-data` the
+    files multidict will be filled with `FileStorage` objects.  If the
+    mimetype is unknown the input stream is wrapped and returned as first
+    argument, else the stream is empty.
+
+    This function does not raise exceptions, even if the input data is
+    malformed.
+
+    Have a look at :ref:`dealing-with-request-data` for more details.
+
+    .. versionadded:: 0.5
+       The `max_form_memory_size`, `max_content_length` and
+       `cls` parameters were added.
+
+    .. versionadded:: 0.5.1
+       The optional `silent` flag was added.
+
+    :param environ: the WSGI environment to be used for parsing.
+    :param stream_factory: An optional callable that returns a new read and
+                           writeable file descriptor.  This callable works
+                           the same as :meth:`~BaseResponse._get_file_stream`.
+    :param charset: The character set for URL and url encoded form data.
+    :param errors: The encoding error behavior.
+    :param max_form_memory_size: the maximum number of bytes to be accepted for
+                           in-memory stored form data.  If the data
+                           exceeds the value specified an
+                           :exc:`~exceptions.RequestURITooLarge`
+                           exception is raised.
+    :param max_content_length: If this is provided and the transmitted data
+                               is longer than this value an
+                               :exc:`~exceptions.RequestEntityTooLarge`
+                               exception is raised.
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    :param silent: If set to False parsing errors will not be caught.
+    :return: A tuple in the form ``(stream, form, files)``.
+    """
+    content_type, extra = parse_options_header(environ.get('CONTENT_TYPE', ''))
+    try:
+        content_length = int(environ['CONTENT_LENGTH'])
+    except (KeyError, ValueError):
+        content_length = 0
+
+    if cls is None:
+        cls = MultiDict
+
+    if max_content_length is not None and content_length > max_content_length:
+        raise RequestEntityTooLarge()
+
+    stream = _empty_stream
+    files = ()
+
+    if content_type == 'multipart/form-data':
+        try:
+            form, files = parse_multipart(environ['wsgi.input'],
+                                          extra.get('boundary'),
+                                          content_length, stream_factory,
+                                          charset, errors,
+                                          max_form_memory_size=max_form_memory_size)
+        except ValueError, e:
+            if not silent:
+                raise
+            form = cls()
+        else:
+            form = cls(form)
+    elif content_type == 'application/x-www-form-urlencoded' or \
+         content_type == 'application/x-url-encoded':
+        if max_form_memory_size is not None and \
+           content_length > max_form_memory_size:
+            raise RequestEntityTooLarge()
+        form = url_decode(environ['wsgi.input'].read(content_length),
+                          charset, errors=errors, cls=cls)
+    else:
+        form = cls()
+        stream = LimitedStream(environ['wsgi.input'], content_length)
+
+    return stream, form, cls(files)
+
+
+def _fix_ie_filename(filename):
+    """Internet Explorer 6 transmits the full file name if a file is
+    uploaded.  This function strips the full path if it thinks the
+    filename is Windows-like absolute.
+    """
+    if filename[1:3] == ':\\' or filename[:2] == '\\\\':
+        return filename.split('\\')[-1]
+    return filename
+
+
+def _line_parse(line):
+    """Removes line ending characters and returns a tuple (`stripped_line`,
+    `is_terminated`).
+    """
+    if line[-2:] == '\r\n':
+        return line[:-2], True
+    elif line[-1:] in '\r\n':
+        return line[:-1], True
+    return line, False
+
+
+def _find_terminator(iterator):
+    """The terminator might have some additional newlines before it.
+    There is at least one application that sends additional newlines
+    before headers (the python setuptools package).
+    """
+    for line in iterator:
+        if not line:
+            break
+        line = line.strip()
+        if line:
+            return line
+    return ''
+
+
+def is_valid_multipart_boundary(boundary):
+    """Checks if the string given is a valid multipart boundary."""
+    return _multipart_boundary_re.match(boundary) is not None
+
+
+def parse_multipart(file, boundary, content_length, stream_factory=None,
+                    charset='utf-8', errors='ignore', buffer_size=10 * 1024,
+                    max_form_memory_size=None):
+    """Parse a multipart/form-data stream.  This is invoked by
+    :func:`utils.parse_form_data` if the content type matches.  Currently it
+    exists for internal usage only, but could be exposed as separate
+    function if it turns out to be useful and if we consider the API stable.
+    """
+    # XXX: this function does not support multipart/mixed.  I don't know of
+    #      any browser that supports this, but it should be implemented
+    #      nonetheless.
+
+    # make sure the buffer size is divisible by four so that we can base64
+    # decode chunk by chunk
+    assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
+    # also the buffer size has to be at least 1024 bytes long or long headers
+    # will freak out the system
+    assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
+
+    if stream_factory is None:
+        stream_factory = default_stream_factory
+
+    if not boundary:
+        raise ValueError('Missing boundary')
+    if not is_valid_multipart_boundary(boundary):
+        raise ValueError('Invalid boundary: %s' % boundary)
+    if len(boundary) > buffer_size: # pragma: no cover
+        # this should never happen because we check for a minimum size
+        # of 1024 and boundaries may not be longer than 200.  The only
+        # situation when this happen is for non debug builds where
+        # the assert i skipped.
+        raise ValueError('Boundary longer than buffer size')
+
+    total_content_length = content_length
+    next_part = '--' + boundary
+    last_part = next_part + '--'
+
+    form = []
+    files = []
+    in_memory = 0
+
+    # convert the file into a limited stream with iteration capabilities
+    file = LimitedStream(file, content_length)
+    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
+                     _empty_string_iter)
+
+    try:
+        terminator = _find_terminator(iterator)
+        if terminator != next_part:
+            raise ValueError('Expected boundary at start of multipart data')
+
+        while terminator != last_part:
+            headers = parse_multipart_headers(iterator)
+            disposition = headers.get('content-disposition')
+            if disposition is None:
+                raise ValueError('Missing Content-Disposition header')
+            disposition, extra = parse_options_header(disposition)
+            name = extra.get('name')
+
+            transfer_encoding = headers.get('content-transfer-encoding')
+            try_decode = transfer_encoding is not None and \
+                         transfer_encoding in _supported_multipart_encodings
+
+            filename = extra.get('filename')
+
+            # if no content type is given we stream into memory.  A list is
+            # used as a temporary container.
+            if filename is None:
+                is_file = False
+                container = []
+                _write = container.append
+                guard_memory = max_form_memory_size is not None
+
+            # otherwise we parse the rest of the headers and ask the stream
+            # factory for something we can write in.
+            else:
+                content_type = headers.get('content-type')
+                content_type = parse_options_header(content_type)[0] \
+                    or 'text/plain'
+                is_file = True
+                guard_memory = False
+                if filename is not None:
+                    filename = _fix_ie_filename(_decode_unicode(filename,
+                                                                charset,
+                                                                errors))
+                try:
+                    content_length = int(headers['content-length'])
+                except (KeyError, ValueError):
+                    content_length = 0
+                container = stream_factory(total_content_length, content_type,
+                                           filename, content_length)
+                _write = container.write
+
+            buf = ''
+            for line in iterator:
+                if not line:
+                    raise ValueError('unexpected end of stream')
+
+                if line[:2] == '--':
+                    terminator = line.rstrip()
+                    if terminator in (next_part, last_part):
+                        break
+
+                if try_decode:
+                    try:
+                        line = line.decode(transfer_encoding)
+                    except:
+                        raise ValueError('could not decode transfer '
+                                         'encoded chunk')
+
+                # we have something in the buffer from the last iteration.
+                # this is usually a newline delimiter.
+                if buf:
+                    _write(buf)
+                    buf = ''
+
+                # If the line ends with windows CRLF we write everything except
+                # the last two bytes.  In all other cases however we write
+                # everything except the last byte.  If it was a newline, that's
+                # fine, otherwise it does not matter because we will write it
+                # the next iteration.  this ensures we do not write the
+                # final newline into the stream.  That way we do not have to
+                # truncate the stream.
+                if line[-2:] == '\r\n':
+                    buf = '\r\n'
+                    cutoff = -2
+                else:
+                    buf = line[-1]
+                    cutoff = -1
+                _write(line[:cutoff])
+
+                # if we write into memory and there is a memory size limit we
+                # count the number of bytes in memory and raise an exception if
+                # there is too much data in memory.
+                if guard_memory:
+                    in_memory += len(line)
+                    if in_memory > max_form_memory_size:
+                        from werkzeug.exceptions import RequestEntityTooLarge
+                        raise RequestEntityTooLarge()
+            else: # pragma: no cover
+                raise ValueError('unexpected end of part')
+
+            if is_file:
+                container.seek(0)
+                files.append((name, FileStorage(container, filename, name,
+                                                content_type,
+                                                content_length, headers)))
+            else:
+                form.append((name, _decode_unicode(''.join(container),
+                                                   charset, errors)))
+    finally:
+        # make sure the whole input stream is read
+        file.exhaust()
+
+    return form, files
+
+
+def parse_multipart_headers(iterable):
+    """Parses multipart headers from an iterable that yields lines (including
+    the trailing newline symbol.
+    """
+    result = []
+    for line in iterable:
+        line, line_terminated = _line_parse(line)
+        if not line_terminated:
+            raise ValueError('unexpected end of line in multipart header')
+        if not line:
+            break
+        elif line[0] in ' \t' and result:
+            key, value = result[-1]
+            result[-1] = (key, value + '\n ' + line[1:])
+        else:
+            parts = line.split(':', 1)
+            if len(parts) == 2:
+                result.append((parts[0].strip(), parts[1].strip()))
+
+    # we link the list to the headers, no need to create a copy, the
+    # list was not shared anyways.
+    return Headers.linked(result)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/http.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,578 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.http
+    ~~~~~~~~~~~~~
+
+    Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
+    HTTP data.  Most of the classes and functions provided by this module are
+    used by the wrappers, but they are useful on their own, too, especially if
+    the response and request objects are not used.
+
+    This covers some of the more HTTP centric features of WSGI, some other
+    utilities such as cookie handling are documented in the `werkzeug.utils`
+    module.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import inspect
+try:
+    from email.utils import parsedate_tz
+except ImportError: # pragma: no cover
+    from email.Utils import parsedate_tz
+from urllib2 import parse_http_list as _parse_list_header
+from datetime import datetime, timedelta
+try:
+    from hashlib import md5
+except ImportError: # pragma: no cover
+    from md5 import new as md5
+
+
+#: HTTP_STATUS_CODES is "exported" from this module.
+#: XXX: move to werkzeug.consts or something
+from werkzeug._internal import HTTP_STATUS_CODES
+
+
+_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
+_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                         '^_`abcdefghijklmnopqrstuvwxyz|~')
+_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
+_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
+_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
+_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*(?:=\s*([^;]+|%s))?\s*' %
+    (_quoted_string_re, _quoted_string_re))
+
+_entity_headers = frozenset([
+    'allow', 'content-encoding', 'content-language', 'content-length',
+    'content-location', 'content-md5', 'content-range', 'content-type',
+    'expires', 'last-modified'
+])
+_hop_by_pop_headers = frozenset([
+    'connection', 'keep-alive', 'proxy-authenticate',
+    'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
+    'upgrade'
+])
+
+
+def quote_header_value(value, extra_chars='', allow_token=True):
+    """Quote a header value if necessary.
+
+    .. versionadded:: 0.5
+
+    :param value: the value to quote.
+    :param extra_chars: a list of extra characters to skip quoting.
+    :param allow_token: if this is enabled token values are returned
+                        unchanged.
+    """
+    value = str(value)
+    if allow_token:
+        token_chars = _token_chars | set(extra_chars)
+        if set(value).issubset(token_chars):
+            return value
+    return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
+
+
+def unquote_header_value(value, is_filename=False):
+    r"""Unquotes a header value.  (Reversal of :func:`quote_header_value`).
+    This does not use the real unquoting but what browsers are actually
+    using for quoting.
+
+    .. versionadded:: 0.5
+
+    :param value: the header value to unquote.
+    """
+    if value and value[0] == value[-1] == '"':
+        # this is not the real unquoting, but fixing this so that the
+        # RFC is met will result in bugs with internet explorer and
+        # probably some other browsers as well.  IE for example is
+        # uploading files with "C:\foo\bar.txt" as filename
+        value = value[1:-1]
+
+        # if this is a filename and the starting characters look like
+        # a UNC path, then just return the value without quotes.  Using the
+        # replace sequence below on a UNC path has the effect of turning
+        # the leading double slash into a single slash and then
+        # _fix_ie_filename() doesn't work correctly.  See #458.
+        if not is_filename or value[:2] != '\\\\':
+            return value.replace('\\\\', '\\').replace('\\"', '"')
+    return value
+
+
+def dump_options_header(header, options):
+    """The reverse function to :func:`parse_options_header`.
+
+    :param header: the header to dump
+    :param options: a dict of options to append.
+    """
+    segments = []
+    if header is not None:
+        segments.append(header)
+    for key, value in options.iteritems():
+        if value is None:
+            segments.append(key)
+        else:
+            segments.append('%s=%s' % (key, quote_header_value(value)))
+    return '; '.join(segments)
+
+
+def dump_header(iterable, allow_token=True):
+    """Dump an HTTP header again.  This is the reversal of
+    :func:`parse_list_header`, :func:`parse_set_header` and
+    :func:`parse_dict_header`.  This also quotes strings that include an
+    equals sign unless you pass it as dict of key, value pairs.
+
+    >>> dump_header({'foo': 'bar baz'})
+    'foo="bar baz"'
+    >>> dump_header(('foo', 'bar baz'))
+    'foo, "bar baz"'
+
+    :param iterable: the iterable or dict of values to quote.
+    :param allow_token: if set to `False` tokens as values are disallowed.
+                        See :func:`quote_header_value` for more details.
+    """
+    if isinstance(iterable, dict):
+        items = []
+        for key, value in iterable.iteritems():
+            if value is None:
+                items.append(key)
+            else:
+                items.append('%s=%s' % (
+                    key,
+                    quote_header_value(value, allow_token=allow_token)
+                ))
+    else:
+        items = [quote_header_value(x, allow_token=allow_token)
+                 for x in iterable]
+    return ', '.join(items)
+
+
+def parse_list_header(value):
+    """Parse lists as described by RFC 2068 Section 2.
+
+    In particular, parse comma-separated lists where the elements of
+    the list may include quoted-strings.  A quoted-string could
+    contain a comma.  A non-quoted string could have quotes in the
+    middle.  Quotes are removed automatically after parsing.
+
+    It basically works like :func:`parse_set_header` just that items
+    may appear multiple times and case sensitivity is preserved.
+
+    The return value is a standard :class:`list`:
+
+    >>> parse_list_header('token, "quoted value"')
+    ['token', 'quoted value']
+
+    To create a header from the :class:`list` again, use the
+    :func:`dump_header` function.
+
+    :param value: a string with a list header.
+    :return: :class:`list`
+    """
+    result = []
+    for item in _parse_list_header(value):
+        if item[:1] == item[-1:] == '"':
+            item = unquote_header_value(item[1:-1])
+        result.append(item)
+    return result
+
+
+def parse_dict_header(value):
+    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
+    convert them into a python dict:
+
+    >>> d = parse_dict_header('foo="is a fish", bar="as well"')
+    >>> type(d) is dict
+    True
+    >>> sorted(d.items())
+    [('bar', 'as well'), ('foo', 'is a fish')]
+
+    If there is no value for a key it will be `None`:
+
+    >>> parse_dict_header('key_without_value')
+    {'key_without_value': None}
+
+    To create a header from the :class:`dict` again, use the
+    :func:`dump_header` function.
+
+    :param value: a string with a dict header.
+    :return: :class:`dict`
+    """
+    result = {}
+    for item in _parse_list_header(value):
+        if '=' not in item:
+            result[item] = None
+            continue
+        name, value = item.split('=', 1)
+        if value[:1] == value[-1:] == '"':
+            value = unquote_header_value(value[1:-1])
+        result[name] = value
+    return result
+
+
+def parse_options_header(value):
+    """Parse a ``Content-Type`` like header into a tuple with the content
+    type and the options:
+
+    >>> parse_options_header('Content-Type: text/html; mimetype=text/html')
+    ('Content-Type: text/html', {'mimetype': 'text/html'})
+
+    This should not be used to parse ``Cache-Control`` like headers that use
+    a slightly different format.  For these headers use the
+    :func:`parse_dict_header` function.
+
+    .. versionadded:: 0.5
+
+    :param value: the header to parse.
+    :return: (str, options)
+    """
+    def _tokenize(string):
+        for match in _option_header_piece_re.finditer(string):
+            key, value = match.groups()
+            key = unquote_header_value(key)
+            if value is not None:
+                value = unquote_header_value(value, key == 'filename')
+            yield key, value
+
+    if not value:
+        return '', {}
+
+    parts = _tokenize(';' + value)
+    name = parts.next()[0]
+    extra = dict(parts)
+    return name, extra
+
+
+def parse_accept_header(value, cls=None):
+    """Parses an HTTP Accept-* header.  This does not implement a complete
+    valid algorithm but one that supports at least value and quality
+    extraction.
+
+    Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
+    tuples sorted by the quality with some additional accessor methods).
+
+    The second parameter can be a subclass of :class:`Accept` that is created
+    with the parsed values and returned.
+
+    :param value: the accept header string to be parsed.
+    :param cls: the wrapper class for the return value (can be
+                         :class:`Accept` or a subclass thereof)
+    :return: an instance of `cls`.
+    """
+    if cls is None:
+        cls = Accept
+
+    if not value:
+        return cls(None)
+
+    result = []
+    for match in _accept_re.finditer(value):
+        quality = match.group(2)
+        if not quality:
+            quality = 1
+        else:
+            quality = max(min(float(quality), 1), 0)
+        result.append((match.group(1), quality))
+    return cls(result)
+
+
+def parse_cache_control_header(value, on_update=None, cls=None):
+    """Parse a cache control header.  The RFC differs between response and
+    request cache control, this method does not.  It's your responsibility
+    to not use the wrong control statements.
+
+    .. versionadded:: 0.5
+       The `cls` was added.  If not specified an immutable
+       :class:`RequestCacheControl` is returned.
+
+    :param value: a cache control header to be parsed.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`CacheControl` object is changed.
+    :param cls: the class for the returned object.  By default
+                                :class:`RequestCacheControl` is used.
+    :return: a `cls` object.
+    """
+    if cls is None:
+        cls = RequestCacheControl
+    if not value:
+        return cls(None, on_update)
+    return cls(parse_dict_header(value), on_update)
+
+
+def parse_set_header(value, on_update=None):
+    """Parse a set-like header and return a :class:`HeaderSet` object:
+
+    >>> hs = parse_set_header('token, "quoted value"')
+
+    The return value is an object that treats the items case-insensitively
+    and keeps the order of the items:
+
+    >>> 'TOKEN' in hs
+    True
+    >>> hs.index('quoted value')
+    1
+    >>> hs
+    HeaderSet(['token', 'quoted value'])
+
+    To create a header from the :class:`HeaderSet` again, use the
+    :func:`dump_header` function.
+
+    :param value: a set header to be parsed.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`HeaderSet` object is changed.
+    :return: a :class:`HeaderSet`
+    """
+    if not value:
+        return HeaderSet(None, on_update)
+    return HeaderSet(parse_list_header(value), on_update)
+
+
+def parse_authorization_header(value):
+    """Parse an HTTP basic/digest authorization header transmitted by the web
+    browser.  The return value is either `None` if the header was invalid or
+    not given, otherwise an :class:`Authorization` object.
+
+    :param value: the authorization header to parse.
+    :return: a :class:`Authorization` object or `None`.
+    """
+    if not value:
+        return
+    try:
+        auth_type, auth_info = value.split(None, 1)
+        auth_type = auth_type.lower()
+    except ValueError:
+        return
+    if auth_type == 'basic':
+        try:
+            username, password = auth_info.decode('base64').split(':', 1)
+        except Exception, e:
+            return
+        return Authorization('basic', {'username': username,
+                                       'password': password})
+    elif auth_type == 'digest':
+        auth_map = parse_dict_header(auth_info)
+        for key in 'username', 'realm', 'nonce', 'uri', 'nc', 'cnonce', \
+                   'response':
+            if not key in auth_map:
+                return
+        return Authorization('digest', auth_map)
+
+
+def parse_www_authenticate_header(value, on_update=None):
+    """Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate`
+    object.
+
+    :param value: a WWW-Authenticate header to parse.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`WWWAuthenticate` object is changed.
+    :return: a :class:`WWWAuthenticate` object.
+    """
+    if not value:
+        return WWWAuthenticate(on_update=on_update)
+    try:
+        auth_type, auth_info = value.split(None, 1)
+        auth_type = auth_type.lower()
+    except (ValueError, AttributeError):
+        return WWWAuthenticate(value.strip().lower(), on_update=on_update)
+    return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
+                           on_update)
+
+
+def quote_etag(etag, weak=False):
+    """Quote an etag.
+
+    :param etag: the etag to quote.
+    :param weak: set to `True` to tag it "weak".
+    """
+    if '"' in etag:
+        raise ValueError('invalid etag')
+    etag = '"%s"' % etag
+    if weak:
+        etag = 'w/' + etag
+    return etag
+
+
+def unquote_etag(etag):
+    """Unquote a single etag:
+
+    >>> unquote_etag('w/"bar"')
+    ('bar', True)
+    >>> unquote_etag('"bar"')
+    ('bar', False)
+
+    :param etag: the etag identifier to unquote.
+    :return: a ``(etag, weak)`` tuple.
+    """
+    if not etag:
+        return None, None
+    etag = etag.strip()
+    weak = False
+    if etag[:2] in ('w/', 'W/'):
+        weak = True
+        etag = etag[2:]
+    if etag[:1] == etag[-1:] == '"':
+        etag = etag[1:-1]
+    return etag, weak
+
+
+def parse_etags(value):
+    """Parse an etag header.
+
+    :param value: the tag header to parse
+    :return: an :class:`ETags` object.
+    """
+    if not value:
+        return ETags()
+    strong = []
+    weak = []
+    end = len(value)
+    pos = 0
+    while pos < end:
+        match = _etag_re.match(value, pos)
+        if match is None:
+            break
+        is_weak, quoted, raw = match.groups()
+        if raw == '*':
+            return ETags(star_tag=True)
+        elif quoted:
+            raw = quoted
+        if is_weak:
+            weak.append(raw)
+        else:
+            strong.append(raw)
+        pos = match.end()
+    return ETags(strong, weak)
+
+
+def generate_etag(data):
+    """Generate an etag for some data."""
+    return md5(data).hexdigest()
+
+
+def parse_date(value):
+    """Parse one of the following date formats into a datetime object:
+
+    .. sourcecode:: text
+
+        Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123
+        Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+        Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format
+
+    If parsing fails the return value is `None`.
+
+    :param value: a string with a supported date format.
+    :return: a :class:`datetime.datetime` object.
+    """
+    if value:
+        t = parsedate_tz(value.strip())
+        if t is not None:
+            try:
+                year = t[0]
+                # unfortunately that function does not tell us if two digit
+                # years were part of the string, or if they were prefixed
+                # with two zeroes.  So what we do is to assume that 69-99
+                # refer to 1900, and everything below to 2000
+                if year >= 0 and year <= 68:
+                    year += 2000
+                elif year >= 69 and year <= 99:
+                    year += 1900
+                return datetime(*((year,) + t[1:7])) - \
+                       timedelta(seconds=t[-1] or 0)
+            except (ValueError, OverflowError):
+                return None
+
+
+def is_resource_modified(environ, etag=None, data=None, last_modified=None):
+    """Convenience method for conditional requests.
+
+    :param environ: the WSGI environment of the request to be checked.
+    :param etag: the etag for the response for comparison.
+    :param data: or alternatively the data of the response to automatically
+                 generate an etag using :func:`generate_etag`.
+    :param last_modified: an optional date of the last modification.
+    :return: `True` if the resource was modified, otherwise `False`.
+    """
+    if etag is None and data is not None:
+        etag = generate_etag(data)
+    elif data is not None:
+        raise TypeError('both data and etag given')
+    if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
+        return False
+
+    unmodified = False
+    if isinstance(last_modified, basestring):
+        last_modified = parse_date(last_modified)
+    modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
+
+    if modified_since and last_modified and last_modified <= modified_since:
+        unmodified = True
+    if etag:
+        if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
+        if if_none_match:
+            unmodified = if_none_match.contains_raw(etag)
+
+    return not unmodified
+
+
+def remove_entity_headers(headers, allowed=('expires', 'content-location')):
+    """Remove all entity headers from a list or :class:`Headers` object.  This
+    operation works in-place.  `Expires` and `Content-Location` headers are
+    by default not removed.  The reason for this is :rfc:`2616` section
+    10.3.5 which specifies some entity headers that should be sent.
+
+    .. versionchanged:: 0.5
+       added `allowed` parameter.
+
+    :param headers: a list or :class:`Headers` object.
+    :param allowed: a list of headers that should still be allowed even though
+                    they are entity headers.
+    """
+    allowed = set(x.lower() for x in allowed)
+    headers[:] = [(key, value) for key, value in headers if
+                  not is_entity_header(key) or key.lower() in allowed]
+
+
+def remove_hop_by_hop_headers(headers):
+    """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
+    :class:`Headers` object.  This operation works in-place.
+
+    .. versionadded:: 0.5
+
+    :param headers: a list or :class:`Headers` object.
+    """
+    headers[:] = [(key, value) for key, value in headers if
+                  not is_hop_by_hop_header(key)]
+
+
+def is_entity_header(header):
+    """Check if a header is an entity header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an entity header, `False` otherwise.
+    """
+    return header.lower() in _entity_headers
+
+
+def is_hop_by_hop_header(header):
+    """Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an entity header, `False` otherwise.
+    """
+    return header.lower() in _hop_by_pop_headers
+
+
+# circular dependency fun
+from werkzeug.datastructures import Headers, Accept, RequestCacheControl, \
+     ResponseCacheControl, HeaderSet, ETags, Authorization, \
+     WWWAuthenticate
+
+
+# DEPRECATED
+# backwards compatible imports
+from werkzeug.datastructures import MIMEAccept, CharsetAccept, LanguageAccept
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/local.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,405 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.local
+    ~~~~~~~~~~~~~~
+
+    This module implements context-local objects.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+try:
+    from greenlet import getcurrent as get_current_greenlet
+except ImportError: # pragma: no cover
+    try:
+        from py.magic import greenlet
+        get_current_greenlet = greenlet.getcurrent
+        del greenlet
+    except:
+        # catch all, py.* fails with so many different errors.
+        get_current_greenlet = int
+try:
+    from thread import get_ident as get_current_thread, allocate_lock
+except ImportError: # pragma: no cover
+    from dummy_thread import get_ident as get_current_thread, allocate_lock
+
+from werkzeug.wsgi import ClosingIterator
+from werkzeug._internal import _patch_wrapper
+
+
+# get the best ident function.  if greenlets are not installed we can
+# safely just use the builtin thread function and save a python methodcall
+# and the cost of calculating a hash.
+if get_current_greenlet is int: # pragma: no cover
+    get_ident = get_current_thread
+else:
+    get_ident = lambda: (get_current_thread(), get_current_greenlet())
+
+
+def release_local(local):
+    """Releases the contents of the local for the current context.
+    This makes it possible to use locals without a manager.
+
+    Example::
+
+        >>> loc = Local()
+        >>> loc.foo = 42
+        >>> release_local(loc)
+        >>> hasattr(loc, 'foo')
+        False
+
+    With this function one can release :class:`Local` objects as well
+    as :class:`StackLocal` objects.  However it is not possible to
+    release data held by proxies that way, one always has to retain
+    a reference to the underlying local object in order to be able
+    to release it.
+
+    .. versionadded:: 0.6.1
+    """
+    local.__release_local__()
+
+
+class Local(object):
+    __slots__ = ('__storage__', '__lock__')
+
+    def __init__(self):
+        object.__setattr__(self, '__storage__', {})
+        object.__setattr__(self, '__lock__', allocate_lock())
+
+    def __iter__(self):
+        return self.__storage__.iteritems()
+
+    def __call__(self, proxy):
+        """Create a proxy for a name."""
+        return LocalProxy(self, proxy)
+
+    def __release_local__(self):
+        self.__storage__.pop(get_ident(), None)
+
+    def __getattr__(self, name):
+        self.__lock__.acquire()
+        try:
+            try:
+                return self.__storage__[get_ident()][name]
+            except KeyError:
+                raise AttributeError(name)
+        finally:
+            self.__lock__.release()
+
+    def __setattr__(self, name, value):
+        self.__lock__.acquire()
+        try:
+            ident = get_ident()
+            storage = self.__storage__
+            if ident in storage:
+                storage[ident][name] = value
+            else:
+                storage[ident] = {name: value}
+        finally:
+            self.__lock__.release()
+
+    def __delattr__(self, name):
+        self.__lock__.acquire()
+        try:
+            try:
+                del self.__storage__[get_ident()][name]
+            except KeyError:
+                raise AttributeError(name)
+        finally:
+            self.__lock__.release()
+
+
+class LocalStack(object):
+    """This class works similar to a :class:`Local` but keeps a stack
+    of objects instead.  This is best explained with an example::
+
+        >>> ls = LocalStack()
+        >>> ls.push(42)
+        >>> ls.top
+        42
+        >>> ls.push(23)
+        >>> ls.top
+        23
+        >>> ls.pop()
+        23
+        >>> ls.top
+        42
+
+    They can be force released by using a :class:`LocalManager` or with
+    the :func:`release_local` function but the correct way is to pop the
+    item from the stack after using.  When the stack is empty it will
+    no longer be bound to the current context (and as such released).
+
+    By calling the stack without arguments it returns a proxy that resolves to
+    the topmost item on the stack.
+
+    .. versionadded:: 0.6.1
+    """
+
+    def __init__(self):
+        self._local = Local()
+        self._lock = allocate_lock()
+
+    def __release_local__(self):
+        self._local.__release_local__()
+
+    def __call__(self):
+        def _lookup():
+            rv = self.top
+            if rv is None:
+                raise RuntimeError('object unbound')
+            return rv
+        return LocalProxy(_lookup)
+
+    def push(self, obj):
+        """Pushes a new item to the stack"""
+        self._lock.acquire()
+        try:
+            rv = getattr(self._local, 'stack', None)
+            if rv is None:
+                self._local.stack = rv = []
+            rv.append(obj)
+            return rv
+        finally:
+            self._lock.release()
+
+    def pop(self):
+        """Removes the topmost item from the stack, will return the
+        old value or `None` if the stack was already empty.
+        """
+        self._lock.acquire()
+        try:
+            stack = getattr(self._local, 'stack', None)
+            if stack is None:
+                return None
+            elif len(stack) == 1:
+                release_local(self._local)
+                return stack[-1]
+            else:
+                return stack.pop()
+        finally:
+            self._lock.release()
+
+    @property
+    def top(self):
+        """The topmost item on the stack.  If the stack is empty,
+        `None` is returned.
+        """
+        try:
+            return self._local.stack[-1]
+        except (AttributeError, IndexError):
+            return None
+
+
+class LocalManager(object):
+    """Local objects cannot manage themselves. For that you need a local
+    manager.  You can pass a local manager multiple locals or add them later
+    by appending them to `manager.locals`.  Everytime the manager cleans up
+    it, will clean up all the data left in the locals for this context.
+
+    .. versionchanged:: 0.6.1
+       Instead of a manager the :func:`release_local` function can be used
+       as well.
+    """
+
+    def __init__(self, locals=None):
+        if locals is None:
+            self.locals = []
+        elif isinstance(locals, Local):
+            self.locals = [locals]
+        else:
+            self.locals = list(locals)
+
+    def get_ident(self):
+        """Return the context identifier the local objects use internally for
+        this context.  You cannot override this method to change the behavior
+        but use it to link other context local objects (such as SQLAlchemy's
+        scoped sessions) to the Werkzeug locals.
+        """
+        return get_ident()
+
+    def cleanup(self):
+        """Manually clean up the data in the locals for this context.  Call
+        this at the end of the request or use `make_middleware()`.
+        """
+        ident = self.get_ident()
+        for local in self.locals:
+            release_local(local)
+
+    def make_middleware(self, app):
+        """Wrap a WSGI application so that cleaning up happens after
+        request end.
+        """
+        def application(environ, start_response):
+            return ClosingIterator(app(environ, start_response), self.cleanup)
+        return application
+
+    def middleware(self, func):
+        """Like `make_middleware` but for decorating functions.
+
+        Example usage::
+
+            @manager.middleware
+            def application(environ, start_response):
+                ...
+
+        The difference to `make_middleware` is that the function passed
+        will have all the arguments copied from the inner application
+        (name, docstring, module).
+        """
+        return _patch_wrapper(func, self.make_middleware(func))
+
+    def __repr__(self):
+        return '<%s storages: %d>' % (
+            self.__class__.__name__,
+            len(self.locals)
+        )
+
+
+class LocalProxy(object):
+    """Acts as a proxy for a werkzeug local.  Forwards all operations to
+    a proxied object.  The only operations not supported for forwarding
+    are right handed operands and any kind of assignment.
+
+    Example usage::
+
+        from werkzeug import Local
+        l = Local()
+
+        # these are proxies
+        request = l('request')
+        user = l('user')
+
+
+        from werkzeug import LocalStack
+        _response_local = LocalStack()
+
+        # this is a proxy
+        response = _response_local()
+
+    Whenever something is bound to l.user / l.request the proxy objects
+    will forward all operations.  If no object is bound a :exc:`RuntimeError`
+    will be raised.
+
+    To create proxies to :class:`Local` or :class:`LocalStack` objects,
+    call the object as shown above.  If you want to have a proxy to an
+    object looked up by a function, you can (as of Werkzeug 0.6.1) pass
+    a function to the :class:`LocalProxy` constructor::
+
+        session = LocalProxy(lambda: get_current_request().session)
+
+    .. versionchanged:: 0.6.1
+       The class can be instanciated with a callable as well now.
+    """
+    __slots__ = ('__local', '__dict__', '__name__')
+
+    def __init__(self, local, name=None):
+        object.__setattr__(self, '_LocalProxy__local', local)
+        object.__setattr__(self, '__name__', name)
+
+    def _get_current_object(self):
+        """Return the current object.  This is useful if you want the real
+        object behind the proxy at a time for performance reasons or because
+        you want to pass the object into a different context.
+        """
+        if not hasattr(self.__local, '__release_local__'):
+            return self.__local()
+        try:
+            return getattr(self.__local, self.__name__)
+        except AttributeError:
+            raise RuntimeError('no object bound to %s' % self.__name__)
+
+    @property
+    def __dict__(self):
+        try:
+            return self._get_current_object().__dict__
+        except RuntimeError:
+            raise AttributeError('__dict__')
+
+    def __repr__(self):
+        try:
+            obj = self._get_current_object()
+        except RuntimeError:
+            return '<%s unbound>' % self.__class__.__name__
+        return repr(obj)
+
+    def __nonzero__(self):
+        try:
+            return bool(self._get_current_object())
+        except RuntimeError:
+            return False
+
+    def __unicode__(self):
+        try:
+            return unicode(self._get_current_object())
+        except RuntimeError:
+            return repr(self)
+
+    def __dir__(self):
+        try:
+            return dir(self._get_current_object())
+        except RuntimeError:
+            return []
+
+    def __getattr__(self, name):
+        if name == '__members__':
+            return dir(self._get_current_object())
+        return getattr(self._get_current_object(), name)
+
+    def __setitem__(self, key, value):
+        self._get_current_object()[key] = value
+
+    def __delitem__(self, key):
+        del self._get_current_object()[key]
+
+    def __setslice__(self, i, j, seq):
+        self._get_current_object()[i:j] = seq
+
+    def __delslice__(self, i, j):
+        del self._get_current_object()[i:j]
+
+    __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
+    __delattr__ = lambda x, n: delattr(x._get_current_object(), n)
+    __str__ = lambda x: str(x._get_current_object())
+    __lt__ = lambda x, o: x._get_current_object() < o
+    __le__ = lambda x, o: x._get_current_object() <= o
+    __eq__ = lambda x, o: x._get_current_object() == o
+    __ne__ = lambda x, o: x._get_current_object() != o
+    __gt__ = lambda x, o: x._get_current_object() > o
+    __ge__ = lambda x, o: x._get_current_object() >= o
+    __cmp__ = lambda x, o: cmp(x._get_current_object(), o)
+    __hash__ = lambda x: hash(x._get_current_object())
+    __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
+    __len__ = lambda x: len(x._get_current_object())
+    __getitem__ = lambda x, i: x._get_current_object()[i]
+    __iter__ = lambda x: iter(x._get_current_object())
+    __contains__ = lambda x, i: i in x._get_current_object()
+    __getslice__ = lambda x, i, j: x._get_current_object()[i:j]
+    __add__ = lambda x, o: x._get_current_object() + o
+    __sub__ = lambda x, o: x._get_current_object() - o
+    __mul__ = lambda x, o: x._get_current_object() * o
+    __floordiv__ = lambda x, o: x._get_current_object() // o
+    __mod__ = lambda x, o: x._get_current_object() % o
+    __divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
+    __pow__ = lambda x, o: x._get_current_object() ** o
+    __lshift__ = lambda x, o: x._get_current_object() << o
+    __rshift__ = lambda x, o: x._get_current_object() >> o
+    __and__ = lambda x, o: x._get_current_object() & o
+    __xor__ = lambda x, o: x._get_current_object() ^ o
+    __or__ = lambda x, o: x._get_current_object() | o
+    __div__ = lambda x, o: x._get_current_object().__div__(o)
+    __truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
+    __neg__ = lambda x: -(x._get_current_object())
+    __pos__ = lambda x: +(x._get_current_object())
+    __abs__ = lambda x: abs(x._get_current_object())
+    __invert__ = lambda x: ~(x._get_current_object())
+    __complex__ = lambda x: complex(x._get_current_object())
+    __int__ = lambda x: int(x._get_current_object())
+    __long__ = lambda x: long(x._get_current_object())
+    __float__ = lambda x: float(x._get_current_object())
+    __oct__ = lambda x: oct(x._get_current_object())
+    __hex__ = lambda x: hex(x._get_current_object())
+    __index__ = lambda x: x._get_current_object().__index__()
+    __coerce__ = lambda x, o: x.__coerce__(x, o)
+    __enter__ = lambda x: x.__enter__()
+    __exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/posixemulation.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.posixemulation
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Provides a POSIX emulation for some features that are relevant to
+    web applications.  The main purpose is to simplify support for
+    systems such as Windows NT that are not 100% POSIX compatible.
+
+    Currently this only implements a :func:`rename` function that
+    follows POSIX semantics.  Eg: if the target file already exists it
+    will be replaced without asking.
+
+    This module was introduced in 0.6.1 and is not a public interface.
+    It might become one in later versions of Werkzeug.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+import os
+import errno
+import time
+import random
+
+
+can_rename_open_file = False
+if os.name == 'nt': # pragma: no cover
+    _rename = lambda src, dst: False
+    _rename_atomic = lambda src, dst: False
+
+    try:
+        import ctypes
+
+        _MOVEFILE_REPLACE_EXISTING = 0x1
+        _MOVEFILE_WRITE_THROUGH = 0x8
+        _MoveFileEx = ctypes.windll.kernel32.MoveFileExW
+
+        def _rename(src, dst):
+            if not isinstance(src, unicode):
+                src = unicode(src, sys.getfilesystemencoding())
+            if not isinstance(dst, unicode):
+                dst = unicode(dst, sys.getfilesystemencoding())
+            if _rename_atomic(src, dst):
+                return True
+            retry = 0
+            rv = False
+            while not rv and retry < 100:
+                rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
+                                           _MOVEFILE_WRITE_THROUGH)
+                if not rv:
+                    time.sleep(0.001)
+                    retry += 1
+            return rv
+
+        # new in Vista and Windows Server 2008
+        _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
+        _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
+        _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
+        _CloseHandle = ctypes.windll.kernel32.CloseHandle
+        can_rename_open_file = True
+
+        def _rename_atomic(src, dst):
+            ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
+            if ta == -1:
+                return False
+            try:
+                retry = 0
+                rv = False
+                while not rv and retry < 100:
+                    rv = _MoveFileTransacted(src, dst, None, None,
+                                             _MOVEFILE_REPLACE_EXISTING |
+                                             _MOVEFILE_WRITE_THROUGH, ta)
+                    if rv:
+                        rv = _CommitTransaction(ta)
+                        break
+                    else:
+                        time.sleep(0.001)
+                        retry += 1
+                return rv
+            finally:
+                _CloseHandle(ta)
+    except Exception:
+        pass
+
+    def rename(src, dst):
+        # Try atomic or pseudo-atomic rename
+        if _rename(src, dst):
+            return
+        # Fall back to "move away and replace"
+        try:
+            os.rename(src, dst)
+        except OSError, e:
+            if e.errno != errno.EEXIST:
+                raise
+            old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
+            os.rename(dst, old)
+            os.rename(src, dst)
+            try:
+                os.unlink(old)
+            except Exception:
+                pass
+else:
+    rename = os.rename
+    can_rename_open_file = True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/routing.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,1430 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.routing
+    ~~~~~~~~~~~~~~~~
+
+    When it comes to combining multiple controller or view functions (however
+    you want to call them) you need a dispatcher.  A simple way would be
+    applying regular expression tests on the ``PATH_INFO`` and calling
+    registered callback functions that return the value then.
+
+    This module implements a much more powerful system than simple regular
+    expression matching because it can also convert values in the URLs and
+    build URLs.
+
+    Here a simple example that creates an URL map for an application with
+    two subdomains (www and kb) and some URL rules:
+
+    >>> m = Map([
+    ...     # Static URLs
+    ...     Rule('/', endpoint='static/index'),
+    ...     Rule('/about', endpoint='static/about'),
+    ...     Rule('/help', endpoint='static/help'),
+    ...     # Knowledge Base
+    ...     Subdomain('kb', [
+    ...         Rule('/', endpoint='kb/index'),
+    ...         Rule('/browse/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
+    ...     ])
+    ... ], default_subdomain='www')
+
+    If the application doesn't use subdomains it's perfectly fine to not set
+    the default subdomain and not use the `Subdomain` rule factory.  The endpoint
+    in the rules can be anything, for example import paths or unique
+    identifiers.  The WSGI application can use those endpoints to get the
+    handler for that URL.  It doesn't have to be a string at all but it's
+    recommended.
+
+    Now it's possible to create a URL adapter for one of the subdomains and
+    build URLs:
+
+    >>> c = m.bind('example.com')
+    >>> c.build("kb/browse", dict(id=42))
+    'http://kb.example.com/browse/42/'
+    >>> c.build("kb/browse", dict())
+    'http://kb.example.com/browse/'
+    >>> c.build("kb/browse", dict(id=42, page=3))
+    'http://kb.example.com/browse/42/3'
+    >>> c.build("static/about")
+    '/about'
+    >>> c.build("static/index", force_external=True)
+    'http://www.example.com/'
+
+    >>> c = m.bind('example.com', subdomain='kb')
+    >>> c.build("static/about")
+    'http://www.example.com/about'
+
+    The first argument to bind is the server name *without* the subdomain.
+    Per default it will assume that the script is mounted on the root, but
+    often that's not the case so you can provide the real mount point as
+    second argument:
+
+    >>> c = m.bind('example.com', '/applications/example')
+
+    The third argument can be the subdomain, if not given the default
+    subdomain is used.  For more details about binding have a look at the
+    documentation of the `MapAdapter`.
+
+    And here is how you can match URLs:
+
+    >>> c = m.bind('example.com')
+    >>> c.match("/")
+    ('static/index', {})
+    >>> c.match("/about")
+    ('static/about', {})
+    >>> c = m.bind('example.com', '/', 'kb')
+    >>> c.match("/")
+    ('kb/index', {})
+    >>> c.match("/browse/42/23")
+    ('kb/browse', {'id': 42, 'page': 23})
+
+    If matching fails you get a `NotFound` exception, if the rule thinks
+    it's a good idea to redirect (for example because the URL was defined
+    to have a slash at the end but the request was missing that slash) it
+    will raise a `RequestRedirect` exception.  Both are subclasses of the
+    `HTTPException` so you can use those errors as responses in the
+    application.
+
+    If matching succeeded but the URL rule was incompatible to the given
+    method (for example there were only rules for `GET` and `HEAD` and
+    routing system tried to match a `POST` request) a `MethodNotAllowed`
+    method is raised.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+                             Thomas Johansson.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+from pprint import pformat
+from urlparse import urljoin
+from itertools import izip
+
+from werkzeug.urls import url_encode, url_quote
+from werkzeug.utils import redirect, format_string
+from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
+from werkzeug._internal import _get_environ
+from werkzeug.datastructures import ImmutableDict, MultiDict
+
+
+_rule_re = re.compile(r'''
+    (?P<static>[^<]*)                           # static rule data
+    <
+    (?:
+        (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*)   # converter name
+        (?:\((?P<args>.*?)\))?                  # converter arguments
+        \:                                      # variable delimiter
+    )?
+    (?P<variable>[a-zA-Z][a-zA-Z0-9_]*)         # variable name
+    >
+''', re.VERBOSE)
+_simple_rule_re = re.compile(r'<([^>]+)>')
+
+
+def parse_rule(rule):
+    """Parse a rule and return it as generator. Each iteration yields tuples
+    in the form ``(converter, arguments, variable)``. If the converter is
+    `None` it's a static url part, otherwise it's a dynamic one.
+
+    :internal:
+    """
+    pos = 0
+    end = len(rule)
+    do_match = _rule_re.match
+    used_names = set()
+    while pos < end:
+        m = do_match(rule, pos)
+        if m is None:
+            break
+        data = m.groupdict()
+        if data['static']:
+            yield None, None, data['static']
+        variable = data['variable']
+        converter = data['converter'] or 'default'
+        if variable in used_names:
+            raise ValueError('variable name %r used twice.' % variable)
+        used_names.add(variable)
+        yield converter, data['args'] or None, variable
+        pos = m.end()
+    if pos < end:
+        remaining = rule[pos:]
+        if '>' in remaining or '<' in remaining:
+            raise ValueError('malformed url rule: %r' % rule)
+        yield None, None, remaining
+
+
+def get_converter(map, name, args):
+    """Create a new converter for the given arguments or raise
+    exception if the converter does not exist.
+
+    :internal:
+    """
+    if not name in map.converters:
+        raise LookupError('the converter %r does not exist' % name)
+    if args:
+        storage = type('_Storage', (), {'__getitem__': lambda s, x: x})()
+        args, kwargs = eval(u'(lambda *a, **kw: (a, kw))(%s)' % args, {}, storage)
+    else:
+        args = ()
+        kwargs = {}
+    return map.converters[name](map, *args, **kwargs)
+
+
+class RoutingException(Exception):
+    """Special exceptions that require the application to redirect, notifying
+    about missing urls, etc.
+
+    :internal:
+    """
+
+
+class RequestRedirect(HTTPException, RoutingException):
+    """Raise if the map requests a redirect. This is for example the case if
+    `strict_slashes` are activated and an url that requires a trailing slash.
+
+    The attribute `new_url` contains the absolute destination url.
+    """
+    code = 301
+
+    def __init__(self, new_url):
+        RoutingException.__init__(self, new_url)
+        self.new_url = new_url
+
+    def get_response(self, environ):
+        return redirect(self.new_url, 301)
+
+
+class RequestSlash(RoutingException):
+    """Internal exception."""
+
+
+class BuildError(RoutingException, LookupError):
+    """Raised if the build system cannot find a URL for an endpoint with the
+    values provided.
+    """
+
+    def __init__(self, endpoint, values, method):
+        LookupError.__init__(self, endpoint, values, method)
+        self.endpoint = endpoint
+        self.values = values
+        self.method = method
+
+
+class ValidationError(ValueError):
+    """Validation error.  If a rule converter raises this exception the rule
+    does not match the current URL and the next URL is tried.
+    """
+
+
+class RuleFactory(object):
+    """As soon as you have more complex URL setups it's a good idea to use rule
+    factories to avoid repetitive tasks.  Some of them are builtin, others can
+    be added by subclassing `RuleFactory` and overriding `get_rules`.
+    """
+
+    def get_rules(self, map):
+        """Subclasses of `RuleFactory` have to override this method and return
+        an iterable of rules."""
+        raise NotImplementedError()
+
+
+class Subdomain(RuleFactory):
+    """All URLs provided by this factory have the subdomain set to a
+    specific domain. For example if you want to use the subdomain for
+    the current language this can be a good setup::
+
+        url_map = Map([
+            Rule('/', endpoint='#select_language'),
+            Subdomain('<string(length=2):lang_code>', [
+                Rule('/', endpoint='index'),
+                Rule('/about', endpoint='about'),
+                Rule('/help', endpoint='help')
+            ])
+        ])
+
+    All the rules except for the ``'#select_language'`` endpoint will now
+    listen on a two letter long subdomain that holds the language code
+    for the current request.
+    """
+
+    def __init__(self, subdomain, rules):
+        self.subdomain = subdomain
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.subdomain = self.subdomain
+                yield rule
+
+
+class Submount(RuleFactory):
+    """Like `Subdomain` but prefixes the URL rule with a given string::
+
+        url_map = Map([
+            Rule('/', endpoint='index'),
+            Submount('/blog', [
+                Rule('/', endpoint='blog/index'),
+                Rule('/entry/<entry_slug>', endpoint='blog/show')
+            ])
+        ])
+
+    Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
+    """
+
+    def __init__(self, path, rules):
+        self.path = path.rstrip('/')
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.rule = self.path + rule.rule
+                yield rule
+
+
+class EndpointPrefix(RuleFactory):
+    """Prefixes all endpoints (which must be strings for this factory) with
+    another string. This can be useful for sub applications::
+
+        url_map = Map([
+            Rule('/', endpoint='index'),
+            EndpointPrefix('blog/', [Submount('/blog', [
+                Rule('/', endpoint='index'),
+                Rule('/entry/<entry_slug>', endpoint='show')
+            ])])
+        ])
+    """
+
+    def __init__(self, prefix, rules):
+        self.prefix = prefix
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.endpoint = self.prefix + rule.endpoint
+                yield rule
+
+
+class RuleTemplate(object):
+    """Returns copies of the rules wrapped and expands string templates in
+    the endpoint, rule, defaults or subdomain sections.
+
+    Here a small example for such a rule template::
+
+        from werkzeug.routing import Map, Rule, RuleTemplate
+
+        resource = RuleTemplate([
+            Rule('/$name/', endpoint='$name.list'),
+            Rule('/$name/<int:id>', endpoint='$name.show')
+        ])
+
+        url_map = Map([resource(name='user'), resource(name='page')])
+
+    When a rule template is called the keyword arguments are used to
+    replace the placeholders in all the string parameters.
+    """
+
+    def __init__(self, rules):
+        self.rules = list(rules)
+
+    def __call__(self, *args, **kwargs):
+        return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
+
+
+class RuleTemplateFactory(RuleFactory):
+    """A factory that fills in template variables into rules.  Used by
+    `RuleTemplate` internally.
+
+    :internal:
+    """
+
+    def __init__(self, rules, context):
+        self.rules = rules
+        self.context = context
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                new_defaults = subdomain = None
+                if rule.defaults is not None:
+                    new_defaults = {}
+                    for key, value in rule.defaults.iteritems():
+                        if isinstance(value, basestring):
+                            value = format_string(value, self.context)
+                        new_defaults[key] = value
+                if rule.subdomain is not None:
+                    subdomain = format_string(rule.subdomain, self.context)
+                new_endpoint = rule.endpoint
+                if isinstance(new_endpoint, basestring):
+                    new_endpoint = format_string(new_endpoint, self.context)
+                yield Rule(
+                    format_string(rule.rule, self.context),
+                    new_defaults,
+                    subdomain,
+                    rule.methods,
+                    rule.build_only,
+                    new_endpoint,
+                    rule.strict_slashes
+                )
+
+
+class Rule(RuleFactory):
+    """A Rule represents one URL pattern.  There are some options for `Rule`
+    that change the way it behaves and are passed to the `Rule` constructor.
+    Note that besides the rule-string all arguments *must* be keyword arguments
+    in order to not break the application on Werkzeug upgrades.
+
+    `string`
+        Rule strings basically are just normal URL paths with placeholders in
+        the format ``<converter(arguments):name>`` where the converter and the
+        arguments are optional.  If no converter is defined the `default`
+        converter is used which means `string` in the normal configuration.
+
+        URL rules that end with a slash are branch URLs, others are leaves.
+        If you have `strict_slashes` enabled (which is the default), all
+        branch URLs that are matched without a trailing slash will trigger a
+        redirect to the same URL with the missing slash appended.
+
+        The converters are defined on the `Map`.
+
+    `endpoint`
+        The endpoint for this rule. This can be anything. A reference to a
+        function, a string, a number etc.  The preferred way is using a string
+        because the endpoint is used for URL generation.
+
+    `defaults`
+        An optional dict with defaults for other rules with the same endpoint.
+        This is a bit tricky but useful if you want to have unique URLs::
+
+            url_map = Map([
+                Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
+                Rule('/all/page/<int:page>', endpoint='all_entries')
+            ])
+
+        If a user now visits ``http://example.com/all/page/1`` he will be
+        redirected to ``http://example.com/all/``.  If `redirect_defaults` is
+        disabled on the `Map` instance this will only affect the URL
+        generation.
+
+    `subdomain`
+        The subdomain rule string for this rule. If not specified the rule
+        only matches for the `default_subdomain` of the map.  If the map is
+        not bound to a subdomain this feature is disabled.
+
+        Can be useful if you want to have user profiles on different subdomains
+        and all subdomains are forwarded to your application::
+
+            url_map = Map([
+                Rule('/', subdomain='<username>', endpoint='user/homepage'),
+                Rule('/stats', subdomain='<username>', endpoint='user/stats')
+            ])
+
+    `methods`
+        A sequence of http methods this rule applies to.  If not specified, all
+        methods are allowed. For example this can be useful if you want different
+        endpoints for `POST` and `GET`.  If methods are defined and the path
+        matches but the method matched against is not in this list or in the
+        list of another rule for that path the error raised is of the type
+        `MethodNotAllowed` rather than `NotFound`.  If `GET` is present in the
+        list of methods and `HEAD` is not, `HEAD` is added automatically.
+
+        .. versionchanged:: 0.6.1
+           `HEAD` is now automatically added to the methods if `GET` is
+           present.  The reason for this is that existing code often did not
+           work properly in servers not rewriting `HEAD` to `GET`
+           automatically and it was not documented how `HEAD` should be
+           treated.  This was considered a bug in Werkzeug because of that.
+
+    `strict_slashes`
+        Override the `Map` setting for `strict_slashes` only for this rule. If
+        not specified the `Map` setting is used.
+
+    `build_only`
+        Set this to True and the rule will never match but will create a URL
+        that can be build. This is useful if you have resources on a subdomain
+        or folder that are not handled by the WSGI application (like static data)
+
+    `redirect_to`
+        If given this must be either a string or callable.  In case of a
+        callable it's called with the url adapter that triggered the match and
+        the values of the URL as keyword arguments and has to return the target
+        for the redirect, otherwise it has to be a string with placeholders in
+        rule syntax::
+
+            def foo_with_slug(adapter, id):
+                # ask the database for the slug for the old id.  this of
+                # course has nothing to do with werkzeug.
+                return 'foo/' + Foo.get_slug_for_id(id)
+
+            url_map = Map([
+                Rule('/foo/<slug>', endpoint='foo'),
+                Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
+                Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
+            ])
+
+        When the rule is matched the routing system will raise a
+        `RequestRedirect` exception with the target for the redirect.
+
+        Keep in mind that the URL will be joined against the URL root of the
+        script so don't use a leading slash on the target URL unless you
+        really mean root of that domain.
+    """
+
+    def __init__(self, string, defaults=None, subdomain=None, methods=None,
+                 build_only=False, endpoint=None, strict_slashes=None,
+                 redirect_to=None):
+        if not string.startswith('/'):
+            raise ValueError('urls must start with a leading slash')
+        self.rule = string
+        self.is_leaf = not string.endswith('/')
+
+        self.map = None
+        self.strict_slashes = strict_slashes
+        self.subdomain = subdomain
+        self.defaults = defaults
+        self.build_only = build_only
+        if methods is None:
+            self.methods = None
+        else:
+            self.methods = set([x.upper() for x in methods])
+            if 'HEAD' not in self.methods and 'GET' in self.methods:
+                self.methods.add('HEAD')
+        self.endpoint = endpoint
+        self.greediness = 0
+        self.redirect_to = redirect_to
+
+        if defaults is not None:
+            self.arguments = set(map(str, defaults))
+        else:
+            self.arguments = set()
+        self._trace = self._converters = self._regex = self._weights = None
+
+    def empty(self):
+        """Return an unbound copy of this rule.  This can be useful if you
+        want to reuse an already bound URL for another map."""
+        defaults = None
+        if self.defaults is not None:
+            defaults = dict(self.defaults)
+        return Rule(self.rule, defaults, self.subdomain, self.methods,
+                    self.build_only, self.endpoint, self.strict_slashes,
+                    self.redirect_to)
+
+    def get_rules(self, map):
+        yield self
+
+    def refresh(self):
+        """Rebinds and refreshes the URL.  Call this if you modified the
+        rule in place.
+
+        :internal:
+        """
+        self.bind(self.map, rebind=True)
+
+    def bind(self, map, rebind=False):
+        """Bind the url to a map and create a regular expression based on
+        the information from the rule itself and the defaults from the map.
+
+        :internal:
+        """
+        if self.map is not None and not rebind:
+            raise RuntimeError('url rule %r already bound to map %r' %
+                               (self, self.map))
+        self.map = map
+        if self.strict_slashes is None:
+            self.strict_slashes = map.strict_slashes
+        if self.subdomain is None:
+            self.subdomain = map.default_subdomain
+
+        rule = self.subdomain + '|' + (self.is_leaf and self.rule
+                                       or self.rule.rstrip('/'))
+
+        self._trace = []
+        self._converters = {}
+        self._weights = []
+
+        regex_parts = []
+        for converter, arguments, variable in parse_rule(rule):
+            if converter is None:
+                regex_parts.append(re.escape(variable))
+                self._trace.append((False, variable))
+                self._weights.append(len(variable))
+            else:
+                convobj = get_converter(map, converter, arguments)
+                regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
+                self._converters[variable] = convobj
+                self._trace.append((True, variable))
+                self._weights.append(convobj.weight)
+                self.arguments.add(str(variable))
+                if convobj.is_greedy:
+                    self.greediness += 1
+        if not self.is_leaf:
+            self._trace.append((False, '/'))
+
+        if not self.build_only:
+            regex = r'^%s%s$' % (
+                u''.join(regex_parts),
+                (not self.is_leaf or not self.strict_slashes) and \
+                    '(?<!/)(?P<__suffix__>/?)' or ''
+            )
+            self._regex = re.compile(regex, re.UNICODE)
+
+    def match(self, path):
+        """Check if the rule matches a given path. Path is a string in the
+        form ``"subdomain|/path(method)"`` and is assembled by the map.
+
+        If the rule matches a dict with the converted values is returned,
+        otherwise the return value is `None`.
+
+        :internal:
+        """
+        if not self.build_only:
+            m = self._regex.search(path)
+            if m is not None:
+                groups = m.groupdict()
+                # we have a folder like part of the url without a trailing
+                # slash and strict slashes enabled. raise an exception that
+                # tells the map to redirect to the same url but with a
+                # trailing slash
+                if self.strict_slashes and not self.is_leaf and \
+                   not groups.pop('__suffix__'):
+                    raise RequestSlash()
+                # if we are not in strict slashes mode we have to remove
+                # a __suffix__
+                elif not self.strict_slashes:
+                    del groups['__suffix__']
+
+                result = {}
+                for name, value in groups.iteritems():
+                    try:
+                        value = self._converters[name].to_python(value)
+                    except ValidationError:
+                        return
+                    result[str(name)] = value
+                if self.defaults is not None:
+                    result.update(self.defaults)
+                return result
+
+    def build(self, values, append_unknown=True):
+        """Assembles the relative url for that rule and the subdomain.
+        If building doesn't work for some reasons `None` is returned.
+
+        :internal:
+        """
+        tmp = []
+        add = tmp.append
+        processed = set(self.arguments)
+        for is_dynamic, data in self._trace:
+            if is_dynamic:
+                try:
+                    add(self._converters[data].to_url(values[data]))
+                except ValidationError:
+                    return
+                processed.add(data)
+            else:
+                add(data)
+        subdomain, url = (u''.join(tmp)).split('|', 1)
+
+        if append_unknown:
+            query_vars = MultiDict(values)
+            for key in processed:
+                if key in query_vars:
+                    del query_vars[key]
+
+            if query_vars:
+                url += '?' + url_encode(query_vars, self.map.charset,
+                                        sort=self.map.sort_parameters,
+                                        key=self.map.sort_key)
+
+        return subdomain, url
+
+    def provides_defaults_for(self, rule):
+        """Check if this rule has defaults for a given rule.
+
+        :internal:
+        """
+        return not self.build_only and self.defaults is not None and \
+               self.endpoint == rule.endpoint and self != rule and \
+               self.arguments == rule.arguments
+
+    def suitable_for(self, values, method=None):
+        """Check if the dict of values has enough data for url generation.
+
+        :internal:
+        """
+        if method is not None:
+            if self.methods is not None and method not in self.methods:
+                return False
+
+        valueset = set(values)
+
+        for key in self.arguments - set(self.defaults or ()):
+            if key not in values:
+                return False
+
+        if self.arguments.issubset(valueset):
+            if self.defaults is None:
+                return True
+            for key, value in self.defaults.iteritems():
+                if value != values[key]:
+                    return False
+
+        return True
+
+    def match_compare(self, other):
+        """Compare this object with another one for matching.
+
+        :internal:
+        """
+        for sw, ow in izip(self._weights, other._weights):
+            if sw > ow:
+                return -1
+            elif sw < ow:
+                return 1
+        if len(self._weights) > len(other._weights):
+            return -1
+        if len(self._weights) < len(other._weights):
+            return 1
+        if not other.arguments and self.arguments:
+            return 1
+        elif other.arguments and not self.arguments:
+            return -1
+        elif other.defaults is None and self.defaults is not None:
+            return 1
+        elif other.defaults is not None and self.defaults is None:
+            return -1
+        elif self.greediness > other.greediness:
+            return -1
+        elif self.greediness < other.greediness:
+            return 1
+        elif len(self.arguments) > len(other.arguments):
+            return 1
+        elif len(self.arguments) < len(other.arguments):
+            return -1
+        return 1
+
+    def build_compare(self, other):
+        """Compare this object with another one for building.
+
+        :internal:
+        """
+        if not other.arguments and self.arguments:
+            return -1
+        elif other.arguments and not self.arguments:
+            return 1
+        elif other.defaults is None and self.defaults is not None:
+            return -1
+        elif other.defaults is not None and self.defaults is None:
+            return 1
+        elif self.provides_defaults_for(other):
+            return -1
+        elif other.provides_defaults_for(self):
+            return 1
+        elif self.greediness > other.greediness:
+            return -1
+        elif self.greediness < other.greediness:
+            return 1
+        elif len(self.arguments) > len(other.arguments):
+            return -1
+        elif len(self.arguments) < len(other.arguments):
+            return 1
+        return -1
+
+    def __eq__(self, other):
+        return self.__class__ is other.__class__ and \
+               self._trace == other._trace
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __unicode__(self):
+        return self.rule
+
+    def __str__(self):
+        charset = self.map is not None and self.map.charset or 'utf-8'
+        return unicode(self).encode(charset)
+
+    def __repr__(self):
+        if self.map is None:
+            return '<%s (unbound)>' % self.__class__.__name__
+        charset = self.map is not None and self.map.charset or 'utf-8'
+        tmp = []
+        for is_dynamic, data in self._trace:
+            if is_dynamic:
+                tmp.append('<%s>' % data)
+            else:
+                tmp.append(data)
+        return '<%s %r%s -> %s>' % (
+            self.__class__.__name__,
+            (u''.join(tmp).encode(charset)).lstrip('|'),
+            self.methods is not None and ' (%s)' % \
+                ', '.join(self.methods) or '',
+            self.endpoint
+        )
+
+
+class BaseConverter(object):
+    """Base class for all converters."""
+    regex = '[^/]+'
+    is_greedy = False
+    weight = 100
+
+    def __init__(self, map):
+        self.map = map
+
+    def to_python(self, value):
+        return value
+
+    def to_url(self, value):
+        return url_quote(value, self.map.charset)
+
+
+class UnicodeConverter(BaseConverter):
+    """This converter is the default converter and accepts any string but
+    only one path segment.  Thus the string can not include a slash.
+
+    This is the default validator.
+
+    Example::
+
+        Rule('/pages/<page>'),
+        Rule('/<string(length=2):lang_code>')
+
+    :param map: the :class:`Map`.
+    :param minlength: the minimum length of the string.  Must be greater
+                      or equal 1.
+    :param maxlength: the maximum length of the string.
+    :param length: the exact length of the string.
+    """
+
+    def __init__(self, map, minlength=1, maxlength=None, length=None):
+        BaseConverter.__init__(self, map)
+        if length is not None:
+            length = '{%d}' % int(length)
+        else:
+            if maxlength is None:
+                maxlength = ''
+            else:
+                maxlength = int(maxlength)
+            length = '{%s,%s}' % (
+                int(minlength),
+                maxlength
+            )
+        self.regex = '[^/]' + length
+
+
+class AnyConverter(BaseConverter):
+    """Matches one of the items provided.  Items can either be Python
+    identifiers or unicode strings::
+
+        Rule('/<any(about, help, imprint, u"class"):page_name>')
+
+    :param map: the :class:`Map`.
+    :param items: this function accepts the possible items as positional
+                  arguments.
+    """
+
+    def __init__(self, map, *items):
+        BaseConverter.__init__(self, map)
+        self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
+
+
+class PathConverter(BaseConverter):
+    """Like the default :class:`UnicodeConverter`, but it also matches
+    slashes.  This is useful for wikis and similar applications::
+
+        Rule('/<path:wikipage>')
+        Rule('/<path:wikipage>/edit')
+
+    :param map: the :class:`Map`.
+    """
+    regex = '[^/].*?'
+    is_greedy = True
+    weight = 50
+
+
+class NumberConverter(BaseConverter):
+    """Baseclass for `IntegerConverter` and `FloatConverter`.
+
+    :internal:
+    """
+
+    def __init__(self, map, fixed_digits=0, min=None, max=None):
+        BaseConverter.__init__(self, map)
+        self.fixed_digits = fixed_digits
+        self.min = min
+        self.max = max
+
+    def to_python(self, value):
+        if (self.fixed_digits and len(value) != self.fixed_digits):
+            raise ValidationError()
+        value = self.num_convert(value)
+        if (self.min is not None and value < self.min) or \
+           (self.max is not None and value > self.max):
+            raise ValidationError()
+        return value
+
+    def to_url(self, value):
+        value = self.num_convert(value)
+        if self.fixed_digits:
+            value = ('%%0%sd' % self.fixed_digits) % value
+        return str(value)
+
+
+class IntegerConverter(NumberConverter):
+    """This converter only accepts integer values::
+
+        Rule('/page/<int:page>')
+
+    This converter does not support negative values.
+
+    :param map: the :class:`Map`.
+    :param fixed_digits: the number of fixed digits in the URL.  If you set
+                         this to ``4`` for example, the application will
+                         only match if the url looks like ``/0001/``.  The
+                         default is variable length.
+    :param min: the minimal value.
+    :param max: the maximal value.
+    """
+    regex = r'\d+'
+    num_convert = int
+
+
+class FloatConverter(NumberConverter):
+    """This converter only accepts floating point values::
+
+        Rule('/probability/<float:probability>')
+
+    This converter does not support negative values.
+
+    :param map: the :class:`Map`.
+    :param min: the minimal value.
+    :param max: the maximal value.
+    """
+    regex = r'\d+\.\d+'
+    num_convert = float
+
+    def __init__(self, map, min=None, max=None):
+        NumberConverter.__init__(self, map, 0, min, max)
+
+
+#: the default converter mapping for the map.
+DEFAULT_CONVERTERS = {
+    'default':          UnicodeConverter,
+    'string':           UnicodeConverter,
+    'any':              AnyConverter,
+    'path':             PathConverter,
+    'int':              IntegerConverter,
+    'float':            FloatConverter
+}
+
+
+class Map(object):
+    """The map class stores all the URL rules and some configuration
+    parameters.  Some of the configuration values are only stored on the
+    `Map` instance since those affect all rules, others are just defaults
+    and can be overridden for each rule.  Note that you have to specify all
+    arguments besides the `rules` as keyword arguments!
+
+    :param rules: sequence of url rules for this map.
+    :param default_subdomain: The default subdomain for rules without a
+                              subdomain defined.
+    :param charset: charset of the url. defaults to ``"utf-8"``
+    :param strict_slashes: Take care of trailing slashes.
+    :param redirect_defaults: This will redirect to the default rule if it
+                              wasn't visited that way. This helps creating
+                              unique URLs.
+    :param converters: A dict of converters that adds additional converters
+                       to the list of converters. If you redefine one
+                       converter this will override the original one.
+    :param sort_parameters: If set to `True` the url parameters are sorted.
+                            See `url_encode` for more details.
+    :param sort_key: The sort key function for `url_encode`.
+
+    .. versionadded:: 0.5
+        `sort_parameters` and `sort_key` was added.
+    """
+
+    #: .. versionadded:: 0.6
+    #:    a dict of default converters to be used.
+    default_converters = ImmutableDict(DEFAULT_CONVERTERS)
+
+    def __init__(self, rules=None, default_subdomain='', charset='utf-8',
+                 strict_slashes=True, redirect_defaults=True,
+                 converters=None, sort_parameters=False, sort_key=None):
+        self._rules = []
+        self._rules_by_endpoint = {}
+        self._remap = True
+
+        self.default_subdomain = default_subdomain
+        self.charset = charset
+        self.strict_slashes = strict_slashes
+        self.redirect_defaults = redirect_defaults
+
+        self.converters = self.default_converters.copy()
+        if converters:
+            self.converters.update(converters)
+
+        self.sort_parameters = sort_parameters
+        self.sort_key = sort_key
+
+        for rulefactory in rules or ():
+            self.add(rulefactory)
+
+    def is_endpoint_expecting(self, endpoint, *arguments):
+        """Iterate over all rules and check if the endpoint expects
+        the arguments provided.  This is for example useful if you have
+        some URLs that expect a language code and others that do not and
+        you want to wrap the builder a bit so that the current language
+        code is automatically added if not provided but endpoints expect
+        it.
+
+        :param endpoint: the endpoint to check.
+        :param arguments: this function accepts one or more arguments
+                          as positional arguments.  Each one of them is
+                          checked.
+        """
+        self.update()
+        arguments = set(arguments)
+        for rule in self._rules_by_endpoint[endpoint]:
+            if arguments.issubset(rule.arguments):
+                return True
+        return False
+
+    def iter_rules(self, endpoint=None):
+        """Iterate over all rules or the rules of an endpoint.
+
+        :param endpoint: if provided only the rules for that endpoint
+                         are returned.
+        :return: an iterator
+        """
+        if endpoint is not None:
+            return iter(self._rules_by_endpoint[endpoint])
+        return iter(self._rules)
+
+    def add(self, rulefactory):
+        """Add a new rule or factory to the map and bind it.  Requires that the
+        rule is not bound to another map.
+
+        :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
+        """
+        for rule in rulefactory.get_rules(self):
+            rule.bind(self)
+            self._rules.append(rule)
+            self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
+        self._remap = True
+
+    def bind(self, server_name, script_name=None, subdomain=None,
+             url_scheme='http', default_method='GET', path_info=None):
+        """Return a new :class:`MapAdapter` with the details specified to the
+        call.  Note that `script_name` will default to ``'/'`` if not further
+        specified or `None`.  The `server_name` at least is a requirement
+        because the HTTP RFC requires absolute URLs for redirects and so all
+        redirect exceptions raised by Werkzeug will contain the full canonical
+        URL.
+
+        If no path_info is passed to :meth:`match` it will use the default path
+        info passed to bind.  While this doesn't really make sense for
+        manual bind calls, it's useful if you bind a map to a WSGI
+        environment which already contains the path info.
+
+        `subdomain` will default to the `default_subdomain` for this map if
+        no defined. If there is no `default_subdomain` you cannot use the
+        subdomain feature.
+        """
+        if subdomain is None:
+            subdomain = self.default_subdomain
+        if script_name is None:
+            script_name = '/'
+        return MapAdapter(self, server_name, script_name, subdomain,
+                          url_scheme, path_info, default_method)
+
+    def bind_to_environ(self, environ, server_name=None, subdomain=None):
+        """Like :meth:`bind` but you can pass it an WSGI environment and it
+        will fetch the information from that dictionary.  Note that because of
+        limitations in the protocol there is no way to get the current
+        subdomain and real `server_name` from the environment.  If you don't
+        provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
+        `HTTP_HOST` if provided) as used `server_name` with disabled subdomain
+        feature.
+
+        If `subdomain` is `None` but an environment and a server name is
+        provided it will calculate the current subdomain automatically.
+        Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
+        in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
+        subdomain will be ``'staging.dev'``.
+
+        If the object passed as environ has an environ attribute, the value of
+        this attribute is used instead.  This allows you to pass request
+        objects.  Additionally `PATH_INFO` added as a default of the
+        :class:`MapAdapter` so that you don't have to pass the path info to
+        the match method.
+
+        .. versionchanged:: 0.5
+            previously this method accepted a bogus `calculate_subdomain`
+            parameter that did not have any effect.  It was removed because
+            of that.
+
+        :param environ: a WSGI environment.
+        :param server_name: an optional server name hint (see above).
+        :param subdomain: optionally the current subdomain (see above).
+        """
+        environ = _get_environ(environ)
+        if server_name is None:
+            if 'HTTP_HOST' in environ:
+                server_name = environ['HTTP_HOST']
+            else:
+                server_name = environ['SERVER_NAME']
+                if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
+                   in (('https', '443'), ('http', '80')):
+                    server_name += ':' + environ['SERVER_PORT']
+        elif subdomain is None:
+            wsgi_server_name = environ.get('HTTP_HOST', environ['SERVER_NAME'])
+            cur_server_name = wsgi_server_name.split(':', 1)[0].split('.')
+            real_server_name = server_name.split(':', 1)[0].split('.')
+            offset = -len(real_server_name)
+            if cur_server_name[offset:] != real_server_name:
+                raise ValueError('the server name provided (%r) does not '
+                                 'match the server name from the WSGI '
+                                 'environment (%r)' %
+                                 (server_name, wsgi_server_name))
+            subdomain = '.'.join(filter(None, cur_server_name[:offset]))
+        return Map.bind(self, server_name, environ.get('SCRIPT_NAME'),
+                        subdomain, environ['wsgi.url_scheme'],
+                        environ['REQUEST_METHOD'], environ.get('PATH_INFO'))
+
+    def update(self):
+        """Called before matching and building to keep the compiled rules
+        in the correct order after things changed.
+        """
+        if self._remap:
+            self._rules.sort(lambda a, b: a.match_compare(b))
+            for rules in self._rules_by_endpoint.itervalues():
+                rules.sort(lambda a, b: a.build_compare(b))
+            self._remap = False
+
+    def __repr__(self):
+        rules = self.iter_rules()
+        return '%s([%s])' % (self.__class__.__name__, pformat(list(rules)))
+
+
+class MapAdapter(object):
+    """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
+    the URL matching and building based on runtime information.
+    """
+
+    def __init__(self, map, server_name, script_name, subdomain,
+                 url_scheme, path_info, default_method):
+        self.map = map
+        self.server_name = server_name
+        if not script_name.endswith('/'):
+            script_name += '/'
+        self.script_name = script_name
+        self.subdomain = subdomain
+        self.url_scheme = url_scheme
+        self.path_info = path_info or u''
+        self.default_method = default_method
+
+    def dispatch(self, view_func, path_info=None, method=None,
+                 catch_http_exceptions=False):
+        """Does the complete dispatching process.  `view_func` is called with
+        the endpoint and a dict with the values for the view.  It should
+        look up the view function, call it, and return a response object
+        or WSGI application.  http exceptions are not caught by default
+        so that applications can display nicer error messages by just
+        catching them by hand.  If you want to stick with the default
+        error messages you can pass it ``catch_http_exceptions=True`` and
+        it will catch the http exceptions.
+
+        Here a small example for the dispatch usage::
+
+            from werkzeug import Request, Response, responder
+            from werkzeug.routing import Map, Rule
+
+            def on_index(request):
+                return Response('Hello from the index')
+
+            url_map = Map([Rule('/', endpoint='index')])
+            views = {'index': on_index}
+
+            @responder
+            def application(environ, start_response):
+                request = Request(environ)
+                urls = url_map.bind_to_environ(environ)
+                return urls.dispatch(lambda e, v: views[e](request, **v),
+                                     catch_http_exceptions=True)
+
+        Keep in mind that this method might return exception objects, too, so
+        use :class:`Response.force_type` to get a response object.
+
+        :param view_func: a function that is called with the endpoint as
+                          first argument and the value dict as second.  Has
+                          to dispatch to the actual view function with this
+                          information.  (see above)
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        :param catch_http_exceptions: set to `True` to catch any of the
+                                      werkzeug :class:`HTTPException`\s.
+        """
+        try:
+            try:
+                endpoint, args = self.match(path_info, method)
+            except RequestRedirect, e:
+                return e
+            return view_func(endpoint, args)
+        except HTTPException, e:
+            if catch_http_exceptions:
+                return e
+            raise
+
+    def match(self, path_info=None, method=None, return_rule=False):
+        """The usage is simple: you just pass the match method the current
+        path info as well as the method (which defaults to `GET`).  The
+        following things can then happen:
+
+        - you receive a `NotFound` exception that indicates that no URL is
+          matching.  A `NotFound` exception is also a WSGI application you
+          can call to get a default page not found page (happens to be the
+          same object as `werkzeug.exceptions.NotFound`)
+
+        - you receive a `MethodNotAllowed` exception that indicates that there
+          is a match for this URL but not for the current request method.
+          This is useful for RESTful applications.
+
+        - you receive a `RequestRedirect` exception with a `new_url`
+          attribute.  This exception is used to notify you about a request
+          Werkzeug requests from your WSGI application.  This is for example the
+          case if you request ``/foo`` although the correct URL is ``/foo/``
+          You can use the `RequestRedirect` instance as response-like object
+          similar to all other subclasses of `HTTPException`.
+
+        - you get a tuple in the form ``(endpoint, arguments)`` if there is
+          a match (unless `return_rule` is True, in which case you get a tuple
+          in the form ``(rule, arguments)``)
+
+        If the path info is not passed to the match method the default path
+        info of the map is used (defaults to the root URL if not defined
+        explicitly).
+
+        All of the exceptions raised are subclasses of `HTTPException` so they
+        can be used as WSGI responses.  The will all render generic error or
+        redirect pages.
+
+        Here is a small example for matching:
+
+        >>> m = Map([
+        ...     Rule('/', endpoint='index'),
+        ...     Rule('/downloads/', endpoint='downloads/index'),
+        ...     Rule('/downloads/<int:id>', endpoint='downloads/show')
+        ... ])
+        >>> urls = m.bind("example.com", "/")
+        >>> urls.match("/", "GET")
+        ('index', {})
+        >>> urls.match("/downloads/42")
+        ('downloads/show', {'id': 42})
+
+        And here is what happens on redirect and missing URLs:
+
+        >>> urls.match("/downloads")
+        Traceback (most recent call last):
+          ...
+        RequestRedirect: http://example.com/downloads/
+        >>> urls.match("/missing")
+        Traceback (most recent call last):
+          ...
+        NotFound: 404 Not Found
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        :param return_rule: return the rule that matched instead of just the
+                            endpoint (defaults to `False`).
+
+        .. versionadded:: 0.6
+            `return_rule` was added.
+        """
+        self.map.update()
+        if path_info is None:
+            path_info = self.path_info
+        if not isinstance(path_info, unicode):
+            path_info = path_info.decode(self.map.charset, 'ignore')
+        method = (method or self.default_method).upper()
+        path = u'%s|/%s' % (self.subdomain, path_info.lstrip('/'))
+        have_match_for = set()
+        for rule in self.map._rules:
+            try:
+                rv = rule.match(path)
+            except RequestSlash:
+                raise RequestRedirect(str('%s://%s%s%s/%s/' % (
+                    self.url_scheme,
+                    self.subdomain and self.subdomain + '.' or '',
+                    self.server_name,
+                    self.script_name[:-1],
+                    url_quote(path_info.lstrip('/'), self.map.charset)
+                )))
+            if rv is None:
+                continue
+            if rule.methods is not None and method not in rule.methods:
+                have_match_for.update(rule.methods)
+                continue
+            if self.map.redirect_defaults:
+                for r in self.map._rules_by_endpoint[rule.endpoint]:
+                    if r.provides_defaults_for(rule) and \
+                       r.suitable_for(rv, method):
+                        rv.update(r.defaults)
+                        subdomain, path = r.build(rv)
+                        raise RequestRedirect(str('%s://%s%s%s/%s' % (
+                            self.url_scheme,
+                            subdomain and subdomain + '.' or '',
+                            self.server_name,
+                            self.script_name[:-1],
+                            url_quote(path.lstrip('/'), self.map.charset)
+                        )))
+            if rule.redirect_to is not None:
+                if isinstance(rule.redirect_to, basestring):
+                    def _handle_match(match):
+                        value = rv[match.group(1)]
+                        return rule._converters[match.group(1)].to_url(value)
+                    redirect_url = _simple_rule_re.sub(_handle_match,
+                                                       rule.redirect_to)
+                else:
+                    redirect_url = rule.redirect_to(self, **rv)
+                raise RequestRedirect(str(urljoin('%s://%s%s%s' % (
+                    self.url_scheme,
+                    self.subdomain and self.subdomain + '.' or '',
+                    self.server_name,
+                    self.script_name
+                ), redirect_url)))
+            if return_rule:
+                return rule, rv
+            else:
+                return rule.endpoint, rv
+        if have_match_for:
+            raise MethodNotAllowed(valid_methods=list(have_match_for))
+        raise NotFound()
+
+    def test(self, path_info=None, method=None):
+        """Test if a rule would match.  Works like `match` but returns `True`
+        if the URL matches, or `False` if it does not exist.
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        """
+        try:
+            self.match(path_info, method)
+        except RequestRedirect:
+            pass
+        except NotFound:
+            return False
+        return True
+
+    def _partial_build(self, endpoint, values, method, append_unknown):
+        """Helper for :meth:`build`.  Returns subdomain and path for the
+        rule that accepts this endpoint, values and method.
+
+        :internal:
+        """
+        # in case the method is none, try with the default method first
+        if method is None:
+            rv = self._partial_build(endpoint, values, self.default_method,
+                                     append_unknown)
+            if rv is not None:
+                return rv
+
+        # default method did not match or a specific method is passed,
+        # check all and go with first result.
+        for rule in self.map._rules_by_endpoint.get(endpoint, ()):
+            if rule.suitable_for(values, method):
+                rv = rule.build(values, append_unknown)
+                if rv is not None:
+                    return rv
+
+    def build(self, endpoint, values=None, method=None, force_external=False,
+              append_unknown=True):
+        """Building URLs works pretty much the other way round.  Instead of
+        `match` you call `build` and pass it the endpoint and a dict of
+        arguments for the placeholders.
+
+        The `build` function also accepts an argument called `force_external`
+        which, if you set it to `True` will force external URLs. Per default
+        external URLs (include the server name) will only be used if the
+        target URL is on a different subdomain.
+
+        >>> m = Map([
+        ...     Rule('/', endpoint='index'),
+        ...     Rule('/downloads/', endpoint='downloads/index'),
+        ...     Rule('/downloads/<int:id>', endpoint='downloads/show')
+        ... ])
+        >>> urls = m.bind("example.com", "/")
+        >>> urls.build("index", {})
+        '/'
+        >>> urls.build("downloads/show", {'id': 42})
+        '/downloads/42'
+        >>> urls.build("downloads/show", {'id': 42}, force_external=True)
+        'http://example.com/downloads/42'
+
+        Because URLs cannot contain non ASCII data you will always get
+        bytestrings back.  Non ASCII characters are urlencoded with the
+        charset defined on the map instance.
+
+        Additional values are converted to unicode and appended to the URL as
+        URL querystring parameters:
+
+        >>> urls.build("index", {'q': 'My Searchstring'})
+        '/?q=My+Searchstring'
+
+        If a rule does not exist when building a `BuildError` exception is
+        raised.
+
+        The build method accepts an argument called `method` which allows you
+        to specify the method you want to have an URL built for if you have
+        different methods for the same endpoint specified.
+
+        .. versionadded:: 0.6
+           the `append_unknown` parameter was added.
+
+        :param endpoint: the endpoint of the URL to build.
+        :param values: the values for the URL to build.  Unhandled values are
+                       appended to the URL as query parameters.
+        :param method: the HTTP method for the rule if there are different
+                       URLs for different methods on the same endpoint.
+        :param force_external: enforce full canonical external URLs.
+        :param append_unknown: unknown parameters are appended to the generated
+                               URL as query string argument.  Disable this
+                               if you want the builder to ignore those.
+        """
+        self.map.update()
+        if values:
+            if isinstance(values, MultiDict):
+                values = dict((k, v) for k, v in values.iteritems(multi=True)
+                              if v is not None)
+            else:
+                values = dict((k, v) for k, v in values.iteritems()
+                              if v is not None)
+        else:
+            values = {}
+
+        rv = self._partial_build(endpoint, values, method, append_unknown)
+        if rv is None:
+            raise BuildError(endpoint, values, method)
+        subdomain, path = rv
+
+        if not force_external and subdomain == self.subdomain:
+            return str(urljoin(self.script_name, './' + path.lstrip('/')))
+        return str('%s://%s%s%s/%s' % (
+            self.url_scheme,
+            subdomain and subdomain + '.' or '',
+            self.server_name,
+            self.script_name[:-1],
+            path.lstrip('/')
+        ))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/script.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,303 @@
+# -*- coding: utf-8 -*-
+r'''
+    werkzeug.script
+    ~~~~~~~~~~~~~~~
+
+    Most of the time you have recurring tasks while writing an application
+    such as starting up an interactive python interpreter with some prefilled
+    imports, starting the development server, initializing the database or
+    something similar.
+
+    For that purpose werkzeug provides the `werkzeug.script` module which
+    helps you writing such scripts.
+
+
+    Basic Usage
+    -----------
+
+    The following snippet is roughly the same in every werkzeug script::
+
+        #!/usr/bin/env python
+        # -*- coding: utf-8 -*-
+        from werkzeug import script
+
+        # actions go here
+
+        if __name__ == '__main__':
+            script.run()
+
+    Starting this script now does nothing because no actions are defined.
+    An action is a function in the same module starting with ``"action_"``
+    which takes a number of arguments where every argument has a default.  The
+    type of the default value specifies the type of the argument.
+
+    Arguments can then be passed by position or using ``--name=value`` from
+    the shell.
+
+    Because a runserver and shell command is pretty common there are two
+    factory functions that create such commands::
+
+        def make_app():
+            from yourapplication import YourApplication
+            return YourApplication(...)
+
+        action_runserver = script.make_runserver(make_app, use_reloader=True)
+        action_shell = script.make_shell(lambda: {'app': make_app()})
+
+
+    Using The Scripts
+    -----------------
+
+    The script from above can be used like this from the shell now:
+
+    .. sourcecode:: text
+
+        $ ./manage.py --help
+        $ ./manage.py runserver localhost 8080 --debugger --no-reloader
+        $ ./manage.py runserver -p 4000
+        $ ./manage.py shell
+
+    As you can see it's possible to pass parameters as positional arguments
+    or as named parameters, pretty much like Python function calls.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+'''
+import sys
+import inspect
+import getopt
+from os.path import basename
+
+
+argument_types = {
+    bool:       'boolean',
+    str:        'string',
+    int:        'integer',
+    float:      'float'
+}
+
+
+converters = {
+    'boolean':  lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
+    'string':   str,
+    'integer':  int,
+    'float':    float
+}
+
+
+def run(namespace=None, action_prefix='action_', args=None):
+    """Run the script.  Participating actions are looked up in the caller's
+    namespace if no namespace is given, otherwise in the dict provided.
+    Only items that start with action_prefix are processed as actions.  If
+    you want to use all items in the namespace provided as actions set
+    action_prefix to an empty string.
+
+    :param namespace: An optional dict where the functions are looked up in.
+                      By default the local namespace of the caller is used.
+    :param action_prefix: The prefix for the functions.  Everything else
+                          is ignored.
+    :param args: the arguments for the function.  If not specified
+                 :data:`sys.argv` without the first argument is used.
+    """
+    if namespace is None:
+        namespace = sys._getframe(1).f_locals
+    actions = find_actions(namespace, action_prefix)
+
+    if args is None:
+        args = sys.argv[1:]
+    if not args or args[0] in ('-h', '--help'):
+        return print_usage(actions)
+    elif args[0] not in actions:
+        fail('Unknown action \'%s\'' % args[0])
+
+    arguments = {}
+    types = {}
+    key_to_arg = {}
+    long_options = []
+    formatstring = ''
+    func, doc, arg_def = actions[args.pop(0)]
+    for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
+        real_arg = arg.replace('-', '_')
+        if shortcut:
+            formatstring += shortcut
+            if not isinstance(default, bool):
+                formatstring += ':'
+            key_to_arg['-' + shortcut] = real_arg
+        long_options.append(isinstance(default, bool) and arg or arg + '=')
+        key_to_arg['--' + arg] = real_arg
+        key_to_arg[idx] = real_arg
+        types[real_arg] = option_type
+        arguments[real_arg] = default
+
+    try:
+        optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
+    except getopt.GetoptError, e:
+        fail(str(e))
+
+    specified_arguments = set()
+    for key, value in enumerate(posargs):
+        try:
+            arg = key_to_arg[key]
+        except IndexError:
+            fail('Too many parameters')
+        specified_arguments.add(arg)
+        try:
+            arguments[arg] = converters[types[arg]](value)
+        except ValueError:
+            fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
+
+    for key, value in optlist:
+        arg = key_to_arg[key]
+        if arg in specified_arguments:
+            fail('Argument \'%s\' is specified twice' % arg)
+        if types[arg] == 'boolean':
+            if arg.startswith('no_'):
+                value = 'no'
+            else:
+                value = 'yes'
+        try:
+            arguments[arg] = converters[types[arg]](value)
+        except ValueError:
+            fail('Invalid value for \'%s\': %s' % (key, value))
+
+    newargs = {}
+    for k, v in arguments.iteritems():
+        newargs[k.startswith('no_') and k[3:] or k] = v
+    arguments = newargs
+    return func(**arguments)
+
+
+def fail(message, code=-1):
+    """Fail with an error."""
+    print >> sys.stderr, 'Error:', message
+    sys.exit(code)
+
+
+def find_actions(namespace, action_prefix):
+    """Find all the actions in the namespace."""
+    actions = {}
+    for key, value in namespace.iteritems():
+        if key.startswith(action_prefix):
+            actions[key[len(action_prefix):]] = analyse_action(value)
+    return actions
+
+
+def print_usage(actions):
+    """Print the usage information.  (Help screen)"""
+    actions = actions.items()
+    actions.sort()
+    print 'usage: %s <action> [<options>]' % basename(sys.argv[0])
+    print '       %s --help' % basename(sys.argv[0])
+    print
+    print 'actions:'
+    for name, (func, doc, arguments) in actions:
+        print '  %s:' % name
+        for line in doc.splitlines():
+            print '    %s' % line
+        if arguments:
+            print
+        for arg, shortcut, default, argtype in arguments:
+            if isinstance(default, bool):
+                print '    %s' % (
+                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg
+                )
+            else:
+                print '    %-30s%-10s%s' % (
+                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
+                    argtype, default
+                )
+        print
+
+
+def analyse_action(func):
+    """Analyse a function."""
+    description = inspect.getdoc(func) or 'undocumented action'
+    arguments = []
+    args, varargs, kwargs, defaults = inspect.getargspec(func)
+    if varargs or kwargs:
+        raise TypeError('variable length arguments for action not allowed.')
+    if len(args) != len(defaults or ()):
+        raise TypeError('not all arguments have proper definitions')
+
+    for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
+        if arg.startswith('_'):
+            raise TypeError('arguments may not start with an underscore')
+        if not isinstance(definition, tuple):
+            shortcut = None
+            default = definition
+        else:
+            shortcut, default = definition
+        argument_type = argument_types[type(default)]
+        if isinstance(default, bool) and default is True:
+            arg = 'no-' + arg
+        arguments.append((arg.replace('_', '-'), shortcut,
+                          default, argument_type))
+    return func, description, arguments
+
+
+def make_shell(init_func=None, banner=None, use_ipython=True):
+    """Returns an action callback that spawns a new interactive
+    python shell.
+
+    :param init_func: an optional initialization function that is
+                      called before the shell is started.  The return
+                      value of this function is the initial namespace.
+    :param banner: the banner that is displayed before the shell.  If
+                   not specified a generic banner is used instead.
+    :param use_ipython: if set to `True` ipython is used if available.
+    """
+    if banner is None:
+        banner = 'Interactive Werkzeug Shell'
+    if init_func is None:
+        init_func = dict
+    def action(ipython=use_ipython):
+        """Start a new interactive python session."""
+        namespace = init_func()
+        if ipython:
+            try:
+                import IPython
+            except ImportError:
+                pass
+            else:
+                sh = IPython.Shell.IPShellEmbed(banner=banner)
+                sh(global_ns={}, local_ns=namespace)
+                return
+        from code import interact
+        interact(banner, local=namespace)
+    return action
+
+
+def make_runserver(app_factory, hostname='localhost', port=5000,
+                   use_reloader=False, use_debugger=False, use_evalex=True,
+                   threaded=False, processes=1, static_files=None,
+                   extra_files=None, ssl_context=None):
+    """Returns an action callback that spawns a new development server.
+
+    .. versionadded:: 0.5
+       `static_files` and `extra_files` was added.
+
+    ..versionadded:: 0.6.1
+       `ssl_context` was added.
+
+    :param app_factory: a function that returns a new WSGI application.
+    :param hostname: the default hostname the server should listen on.
+    :param port: the default port of the server.
+    :param use_reloader: the default setting for the reloader.
+    :param use_evalex: the default setting for the evalex flag of the debugger.
+    :param threaded: the default threading setting.
+    :param processes: the default number of processes to start.
+    :param static_files: optional dict of static files.
+    :param extra_files: optional list of extra files to track for reloading.
+    :param ssl_context: optional SSL context for running server in HTTPS mode.
+    """
+    def action(hostname=('h', hostname), port=('p', port),
+               reloader=use_reloader, debugger=use_debugger,
+               evalex=use_evalex, threaded=threaded, processes=processes):
+        """Start a new development server."""
+        from werkzeug.serving import run_simple
+        app = app_factory()
+        run_simple(hostname, port, app, reloader, debugger, evalex,
+                   extra_files, 1, threaded, processes,
+                   static_files=static_files, ssl_context=ssl_context)
+    return action
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/security.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.security
+    ~~~~~~~~~~~~~~~~~
+
+    Security related helpers such as secure password hashing tools.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import hmac
+import string
+from random import SystemRandom
+
+# because the API of hmac changed with the introduction of the
+# new hashlib module, we have to support both.  This sets up a
+# mapping to the digest factory functions and the digest modules
+# (or factory functions with changed API)
+try:
+    from hashlib import sha1, md5
+    _hash_funcs = _hash_mods = {'sha1': sha1, 'md5': md5}
+    _sha1_mod = sha1
+    _md5_mod = md5
+except ImportError:
+    import sha as _sha1_mod, md5 as _md5_mod
+    _hash_mods = {'sha1': _sha1_mod, 'md5': _md5_mod}
+    _hash_funcs = {'sha1': _sha1_mod.new, 'md5': _md5_mod.new}
+
+
+SALT_CHARS = string.letters + string.digits
+
+
+_sys_rng = SystemRandom()
+
+
+def gen_salt(length):
+    """Generate a random string of SALT_CHARS with specified ``length``."""
+    if length <= 0:
+        raise ValueError('requested salt of length <= 0')
+    return ''.join(_sys_rng.choice(SALT_CHARS) for _ in xrange(length))
+
+
+def _hash_internal(method, salt, password):
+    """Internal password hash helper.  Supports plaintext without salt,
+    unsalted and salted passwords.  In case salted passwords are used
+    hmac is used.
+    """
+    if method == 'plain':
+        return password
+    if salt:
+        if method not in _hash_mods:
+            return None
+        if isinstance(salt, unicode):
+            salt = salt.encode('utf-8')
+        h = hmac.new(salt, None, _hash_mods[method])
+    else:
+        if method not in _hash_funcs:
+            return None
+        h = _hash_funcs[method]()
+    if isinstance(password, unicode):
+        password = password.encode('utf-8')
+    h.update(password)
+    return h.hexdigest()
+
+
+def generate_password_hash(password, method='sha1', salt_length=8):
+    """Hash a password with the given method and salt with with a string of
+    the given length.  The format of the string returned includes the method
+    that was used so that :func:`check_password_hash` can check the hash.
+
+    The format for the hashed string looks like this::
+
+        method$salt$hash
+
+    This method can **not** generate unsalted passwords but it is possible
+    to set the method to plain to enforce plaintext passwords.  If a salt
+    is used, hmac is used internally to salt the password.
+
+    :param password: the password to hash
+    :param method: the hash method to use (``'md5'`` or ``'sha1'``)
+    :param salt_length: the lengt of the salt in letters
+    """
+    salt = method != 'plain' and gen_salt(salt_length) or ''
+    h = _hash_internal(method, salt, password)
+    if h is None:
+        raise TypeError('invalid method %r' % method)
+    return '%s$%s$%s' % (method, salt, h)
+
+
+def check_password_hash(pwhash, password):
+    """check a password against a given salted and hashed password value.
+    In order to support unsalted legacy passwords this method supports
+    plain text passwords, md5 and sha1 hashes (both salted and unsalted).
+
+    Returns `True` if the password matched, `False` otherwise.
+
+    :param pwhash: a hashed string like returned by
+                   :func:`generate_password_hash`
+    :param password: the plaintext password to compare against the hash
+    """
+    if pwhash.count('$') < 2:
+        return False
+    method, salt, hashval = pwhash.split('$', 2)
+    return _hash_internal(method, salt, password) == hashval
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/serving.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.serving
+    ~~~~~~~~~~~~~~~~
+
+    There are many ways to serve a WSGI application.  While you're developing
+    it you usually don't want a full blown webserver like Apache but a simple
+    standalone one.  From Python 2.5 onwards there is the `wsgiref`_ server in
+    the standard library.  If you're using older versions of Python you can
+    download the package from the cheeseshop.
+
+    However there are some caveats. Sourcecode won't reload itself when
+    changed and each time you kill the server using ``^C`` you get an
+    `KeyboardInterrupt` error.  While the latter is easy to solve the first
+    one can be a pain in the ass in some situations.
+
+    The easiest way is creating a small ``start-myproject.py`` that runs the
+    application::
+
+        #!/usr/bin/env python
+        # -*- coding: utf-8 -*-
+        from myproject import make_app
+        from werkzeug import run_simple
+
+        app = make_app(...)
+        run_simple('localhost', 8080, app, use_reloader=True)
+
+    You can also pass it a `extra_files` keyword argument with a list of
+    additional files (like configuration files) you want to observe.
+
+    For bigger applications you should consider using `werkzeug.script`
+    instead of a simple start file.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import socket
+import sys
+import time
+import thread
+import subprocess
+from urllib import unquote
+from itertools import chain
+from SocketServer import ThreadingMixIn, ForkingMixIn
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+
+import werkzeug
+from werkzeug._internal import _log
+from werkzeug.exceptions import InternalServerError
+
+
+class WSGIRequestHandler(BaseHTTPRequestHandler, object):
+    """A request handler that implements WSGI dispatching."""
+
+    @property
+    def server_version(self):
+        return 'Werkzeug/' + werkzeug.__version__
+
+    def make_environ(self):
+        if '?' in self.path:
+            path_info, query = self.path.split('?', 1)
+        else:
+            path_info = self.path
+            query = ''
+        url_scheme = self.server.ssl_context is None and 'http' or 'https'
+        environ = {
+            'wsgi.version':         (1, 0),
+            'wsgi.url_scheme':      url_scheme,
+            'wsgi.input':           self.rfile,
+            'wsgi.errors':          sys.stderr,
+            'wsgi.multithread':     self.server.multithread,
+            'wsgi.multiprocess':    self.server.multiprocess,
+            'wsgi.run_once':        False,
+            'SERVER_SOFTWARE':      self.server_version,
+            'REQUEST_METHOD':       self.command,
+            'SCRIPT_NAME':          '',
+            'PATH_INFO':            unquote(path_info),
+            'QUERY_STRING':         query,
+            'CONTENT_TYPE':         self.headers.get('Content-Type', ''),
+            'CONTENT_LENGTH':       self.headers.get('Content-Length', ''),
+            'REMOTE_ADDR':          self.client_address[0],
+            'REMOTE_PORT':          self.client_address[1],
+            'SERVER_NAME':          self.server.server_address[0],
+            'SERVER_PORT':          str(self.server.server_address[1]),
+            'SERVER_PROTOCOL':      self.request_version
+        }
+
+        for key, value in self.headers.items():
+            key = 'HTTP_' + key.upper().replace('-', '_')
+            if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
+                environ[key] = value
+
+        return environ
+
+    def run_wsgi(self):
+        app = self.server.app
+        environ = self.make_environ()
+        headers_set = []
+        headers_sent = []
+
+        def write(data):
+            assert headers_set, 'write() before start_response'
+            if not headers_sent:
+                status, response_headers = headers_sent[:] = headers_set
+                code, msg = status.split(None, 1)
+                self.send_response(int(code), msg)
+                header_keys = set()
+                for key, value in response_headers:
+                    self.send_header(key, value)
+                    key = key.lower()
+                    header_keys.add(key)
+                if 'content-length' not in header_keys:
+                    self.close_connection = True
+                    self.send_header('Connection', 'close')
+                if 'server' not in header_keys:
+                    self.send_header('Server', self.version_string())
+                if 'date' not in header_keys:
+                    self.send_header('Date', self.date_time_string())
+                self.end_headers()
+
+            assert type(data) is str, 'applications must write bytes'
+            self.wfile.write(data)
+            self.wfile.flush()
+
+        def start_response(status, response_headers, exc_info=None):
+            if exc_info:
+                try:
+                    if headers_sent:
+                        raise exc_info[0], exc_info[1], exc_info[2]
+                finally:
+                    exc_info = None
+            elif headers_set:
+                raise AssertionError('Headers already set')
+            headers_set[:] = [status, response_headers]
+            return write
+
+        def execute(app):
+            application_iter = app(environ, start_response)
+            try:
+                for data in application_iter:
+                    write(data)
+                # make sure the headers are sent
+                if not headers_sent:
+                    write('')
+            finally:
+                if hasattr(application_iter, 'close'):
+                    application_iter.close()
+                application_iter = None
+
+        try:
+            execute(app)
+        except (socket.error, socket.timeout), e:
+            self.connection_dropped(e, environ)
+        except:
+            if self.server.passthrough_errors:
+                raise
+            from werkzeug.debug.tbtools import get_current_traceback
+            traceback = get_current_traceback(ignore_system_exceptions=True)
+            try:
+                # if we haven't yet sent the headers but they are set
+                # we roll back to be able to set them again.
+                if not headers_sent:
+                    del headers_set[:]
+                execute(InternalServerError())
+            except:
+                pass
+            self.server.log('error', 'Error on request:\n%s',
+                            traceback.plaintext)
+
+    def handle(self):
+        """Handles a request ignoring dropped connections."""
+        try:
+            return BaseHTTPRequestHandler.handle(self)
+        except (socket.error, socket.timeout), e:
+            self.connection_dropped(e)
+        except:
+            if self.server.ssl_context is None or not is_ssl_error():
+                raise
+
+    def connection_dropped(self, error, environ=None):
+        """Called if the connection was closed by the client.  By default
+        nothing happens.
+        """
+
+    def handle_one_request(self):
+        """Handle a single HTTP request."""
+        self.raw_requestline = self.rfile.readline()
+        if not self.raw_requestline:
+            self.close_connection = 1
+        elif self.parse_request():
+            return self.run_wsgi()
+
+    def send_response(self, code, message=None):
+        """Send the response header and log the response code."""
+        self.log_request(code)
+        if message is None:
+            message = code in self.responses and self.responses[code][0] or ''
+        if self.request_version != 'HTTP/0.9':
+            self.wfile.write("%s %d %s\r\n" %
+                             (self.protocol_version, code, message))
+
+    def version_string(self):
+        return BaseHTTPRequestHandler.version_string(self).strip()
+
+    def address_string(self):
+        return self.client_address[0]
+
+    def log_request(self, code='-', size='-'):
+        self.log('info', '"%s" %s %s', self.requestline, code, size)
+
+    def log_error(self, *args):
+        self.log('error', *args)
+
+    def log_message(self, format, *args):
+        self.log('info', format, *args)
+
+    def log(self, type, message, *args):
+        _log(type, '%s - - [%s] %s\n' % (self.address_string(),
+                                         self.log_date_time_string(),
+                                         message % args))
+
+
+#: backwards compatible name if someone is subclassing it
+BaseRequestHandler = WSGIRequestHandler
+
+
+def generate_adhoc_ssl_context():
+    """Generates an adhoc SSL context for the development server."""
+    from random import random
+    from OpenSSL import crypto, SSL
+
+    cert = crypto.X509()
+    cert.set_serial_number(int(random() * sys.maxint))
+    cert.gmtime_adj_notBefore(0)
+    cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
+
+    subject = cert.get_subject()
+    subject.CN = '*'
+    subject.O = 'Dummy Certificate'
+
+    issuer = cert.get_issuer()
+    issuer.CN = 'Untrusted Authority'
+    issuer.O = 'Self-Signed'
+
+    pkey = crypto.PKey()
+    pkey.generate_key(crypto.TYPE_RSA, 768)
+    cert.set_pubkey(pkey)
+    cert.sign(pkey, 'md5')
+
+    ctx = SSL.Context(SSL.SSLv23_METHOD)
+    ctx.use_privatekey(pkey)
+    ctx.use_certificate(cert)
+
+    return ctx
+
+
+def is_ssl_error(error=None):
+    """Checks if the given error (or the current one) is an SSL error."""
+    if error is None:
+        error = sys.exc_info()[1]
+    from OpenSSL import SSL
+    return isinstance(error, SSL.Error)
+
+
+class _SSLConnectionFix(object):
+    """Wrapper around SSL connection to provide a working makefile()."""
+
+    def __init__(self, con):
+        self._con = con
+
+    def makefile(self, mode, bufsize):
+        return socket._fileobject(self._con, mode, bufsize)
+
+    def __getattr__(self, attrib):
+        return getattr(self._con, attrib)
+
+
+def select_ip_version(host, port):
+    """Returns AF_INET4 or AF_INET6 depending on where to connect to."""
+    try:
+        info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+                                  socket.SOCK_STREAM, 0,
+                                  socket.AI_PASSIVE)
+        if info:
+            return info[0][0]
+    except socket.gaierror:
+        pass
+    if ':' in host and hasattr(socket, 'AF_INET6'):
+        return socket.AF_INET6
+    return socket.AF_INET
+
+
+class BaseWSGIServer(HTTPServer, object):
+    """Simple single-threaded, single-process WSGI server."""
+    multithread = False
+    multiprocess = False
+
+    def __init__(self, host, port, app, handler=None,
+                 passthrough_errors=False, ssl_context=None):
+        if handler is None:
+            handler = WSGIRequestHandler
+        self.address_family = select_ip_version(host, port)
+        HTTPServer.__init__(self, (host, int(port)), handler)
+        self.app = app
+        self.passthrough_errors = passthrough_errors
+
+        if ssl_context is not None:
+            try:
+                from OpenSSL import tsafe
+            except ImportError:
+                raise TypeError('SSL is not available if the OpenSSL '
+                                'library is not installed.')
+            if ssl_context == 'adhoc':
+                ssl_context = generate_adhoc_ssl_context()
+            self.socket = tsafe.Connection(ssl_context, self.socket)
+            self.ssl_context = ssl_context
+        else:
+            self.ssl_context = None
+
+    def log(self, type, message, *args):
+        _log(type, message, *args)
+
+    def serve_forever(self):
+        try:
+            HTTPServer.serve_forever(self)
+        except KeyboardInterrupt:
+            pass
+
+    def handle_error(self, request, client_address):
+        if self.passthrough_errors:
+            raise
+        else:
+            return HTTPServer.handle_error(self, request, client_address)
+
+    def get_request(self):
+        con, info = self.socket.accept()
+        if self.ssl_context is not None:
+            con = _SSLConnectionFix(con)
+        return con, info
+
+
+class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
+    """A WSGI server that does threading."""
+    multithread = True
+
+
+class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
+    """A WSGI server that does forking."""
+    multiprocess = True
+
+    def __init__(self, host, port, app, processes=40, handler=None,
+                 passthrough_errors=False, ssl_context=None):
+        BaseWSGIServer.__init__(self, host, port, app, handler,
+                                passthrough_errors, ssl_context)
+        self.max_children = processes
+
+
+def make_server(host, port, app=None, threaded=False, processes=1,
+                request_handler=None, passthrough_errors=False,
+                ssl_context=None):
+    """Create a new server instance that is either threaded, or forks
+    or just processes one request after another.
+    """
+    if threaded and processes > 1:
+        raise ValueError("cannot have a multithreaded and "
+                         "multi process server.")
+    elif threaded:
+        return ThreadedWSGIServer(host, port, app, request_handler,
+                                  passthrough_errors, ssl_context)
+    elif processes > 1:
+        return ForkingWSGIServer(host, port, app, processes, request_handler,
+                                 passthrough_errors, ssl_context)
+    else:
+        return BaseWSGIServer(host, port, app, request_handler,
+                              passthrough_errors, ssl_context)
+
+
+def reloader_loop(extra_files=None, interval=1):
+    """When this function is run from the main thread, it will force other
+    threads to exit when any modules currently loaded change.
+
+    Copyright notice.  This function is based on the autoreload.py from
+    the CherryPy trac which originated from WSGIKit which is now dead.
+
+    :param extra_files: a list of additional files it should watch.
+    """
+    def iter_module_files():
+        for module in sys.modules.values():
+            filename = getattr(module, '__file__', None)
+            if filename:
+                old = None
+                while not os.path.isfile(filename):
+                    old = filename
+                    filename = os.path.dirname(filename)
+                    if filename == old:
+                        break
+                else:
+                    if filename[-4:] in ('.pyc', '.pyo'):
+                        filename = filename[:-1]
+                    yield filename
+
+    mtimes = {}
+    while 1:
+        for filename in chain(iter_module_files(), extra_files or ()):
+            try:
+                mtime = os.stat(filename).st_mtime
+            except OSError:
+                continue
+
+            old_time = mtimes.get(filename)
+            if old_time is None:
+                mtimes[filename] = mtime
+                continue
+            elif mtime > old_time:
+                _log('info', ' * Detected change in %r, reloading' % filename)
+                sys.exit(3)
+        time.sleep(interval)
+
+
+def restart_with_reloader():
+    """Spawn a new Python interpreter with the same arguments as this one,
+    but running the reloader thread.
+    """
+    while 1:
+        _log('info', ' * Restarting with reloader...')
+        args = [sys.executable] + sys.argv
+        new_environ = os.environ.copy()
+        new_environ['WERKZEUG_RUN_MAIN'] = 'true'
+
+        # a weird bug on windows. sometimes unicode strings end up in the
+        # environment and subprocess.call does not like this, encode them
+        # to latin1 and continue.
+        if os.name == 'nt':
+            for key, value in new_environ.iteritems():
+                if isinstance(value, unicode):
+                    new_environ[key] = value.encode('iso-8859-1')
+
+        exit_code = subprocess.call(args, env=new_environ)
+        if exit_code != 3:
+            return exit_code
+
+
+def run_with_reloader(main_func, extra_files=None, interval=1):
+    """Run the given function in an independent python interpreter."""
+    if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
+        thread.start_new_thread(main_func, ())
+        try:
+            reloader_loop(extra_files, interval)
+        except KeyboardInterrupt:
+            return
+    try:
+        sys.exit(restart_with_reloader())
+    except KeyboardInterrupt:
+        pass
+
+
+def run_simple(hostname, port, application, use_reloader=False,
+               use_debugger=False, use_evalex=True,
+               extra_files=None, reloader_interval=1, threaded=False,
+               processes=1, request_handler=None, static_files=None,
+               passthrough_errors=False, ssl_context=None):
+    """Start an application using wsgiref and with an optional reloader.  This
+    wraps `wsgiref` to fix the wrong default reporting of the multithreaded
+    WSGI variable and adds optional multithreading and fork support.
+
+    .. versionadded:: 0.5
+       `static_files` was added to simplify serving of static files as well
+       as `passthrough_errors`.
+
+    .. versionadded:: 0.6
+       support for SSL was added.
+
+    :param hostname: The host for the application.  eg: ``'localhost'``
+    :param port: The port for the server.  eg: ``8080``
+    :param application: the WSGI application to execute
+    :param use_reloader: should the server automatically restart the python
+                         process if modules were changed?
+    :param use_debugger: should the werkzeug debugging system be used?
+    :param use_evalex: should the exception evaluation feature be enabled?
+    :param extra_files: a list of files the reloader should watch
+                        additionally to the modules.  For example configuration
+                        files.
+    :param reloader_interval: the interval for the reloader in seconds.
+    :param threaded: should the process handle each request in a separate
+                     thread?
+    :param processes: number of processes to spawn.
+    :param request_handler: optional parameter that can be used to replace
+                            the default one.  You can use this to replace it
+                            with a different
+                            :class:`~BaseHTTPServer.BaseHTTPRequestHandler`
+                            subclass.
+    :param static_files: a dict of paths for static files.  This works exactly
+                         like :class:`SharedDataMiddleware`, it's actually
+                         just wrapping the application in that middleware before
+                         serving.
+    :param passthrough_errors: set this to `True` to disable the error catching.
+                               This means that the server will die on errors but
+                               it can be useful to hook debuggers in (pdb etc.)
+    :param ssl_context: an SSL context for the connection. Either an OpenSSL
+                        context, the string ``'adhoc'`` if the server should
+                        automatically create one, or `None` to disable SSL
+                        (which is the default).
+    """
+    if use_debugger:
+        from werkzeug.debug import DebuggedApplication
+        application = DebuggedApplication(application, use_evalex)
+    if static_files:
+        from werkzeug.wsgi import SharedDataMiddleware
+        application = SharedDataMiddleware(application, static_files)
+
+    def inner():
+        make_server(hostname, port, application, threaded,
+                    processes, request_handler,
+                    passthrough_errors, ssl_context).serve_forever()
+
+    if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
+        display_hostname = hostname != '*' and hostname or 'localhost'
+        if ':' in display_hostname:
+            display_hostname = '[%s]' % display_hostname
+        _log('info', ' * Running on %s://%s:%d/', ssl_context is None
+             and 'http' or 'https', display_hostname, port)
+    if use_reloader:
+        # Create and destroy a socket so that any exceptions are raised before
+        # we spawn a separate Python interpreter and lose this ability.
+        test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+        test_socket.bind((hostname, port))
+        test_socket.close()
+        run_with_reloader(inner, extra_files, reloader_interval)
+    else:
+        inner()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/templates.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.templates
+    ~~~~~~~~~~~~~~~~~~
+
+    A minimal template engine.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD License.
+"""
+import sys
+import re
+import __builtin__ as builtins
+from compiler import ast, parse
+from compiler.pycodegen import ModuleCodeGenerator
+from tokenize import PseudoToken
+from werkzeug import utils, urls
+from werkzeug._internal import _decode_unicode
+from werkzeug.datastructures import MultiDict
+
+
+# Copyright notice: The `parse_data` method uses the string interpolation
+# algorithm by Ka-Ping Yee which originally was part of `Itpl20.py`_.
+#
+# .. _Itpl20.py: http://lfw.org/python/Itpl20.py
+
+
+token_re = re.compile('%s|%s(?s)' % (
+    r'[uU]?[rR]?("""|\'\'\')((?<!\\)\\\1|.)*?\1',
+    PseudoToken
+))
+directive_re = re.compile(r'(?<!\\)<%(?:(#)|(py(?:thon)?\b)|'
+                          r'(?:\s*(\w+))\s*)(.*?)\s*%>\n?(?s)')
+escape_re = re.compile(r'\\\n|\\(\\|<%)')
+namestart_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
+undefined = type('UndefinedType', (object,), {
+    '__iter__': lambda x: iter(()),
+    '__repr__': lambda x: 'Undefined',
+    '__str__':  lambda x: ''
+})()
+runtime_vars = frozenset(['Undefined', '__to_unicode', '__context',
+                          '__write', '__write_many'])
+
+
+def call_stmt(func, args, lineno):
+    return ast.CallFunc(ast.Name(func, lineno=lineno),
+                        args, lineno=lineno)
+
+
+def tokenize(source, filename):
+    escape = escape_re.sub
+    escape_repl = lambda m: m.group(1) or ''
+    lineno = 1
+    pos = 0
+
+    for match in directive_re.finditer(source):
+        start, end = match.span()
+        if start > pos:
+            data = source[pos:start]
+            yield lineno, 'data', escape(escape_repl, data)
+            lineno += data.count('\n')
+        is_comment, is_code, cmd, args = match.groups()
+        if is_code:
+            yield lineno, 'code', args
+        elif not is_comment:
+            yield lineno, 'cmd', (cmd, args)
+        lineno += source[start:end].count('\n')
+        pos = end
+
+    if pos < len(source):
+        yield lineno, 'data', escape(escape_repl, source[pos:])
+
+
+def transform(node, filename):
+    root = ast.Module(None, node, lineno=1)
+    nodes = [root]
+    while nodes:
+        node = nodes.pop()
+        node.filename = filename
+        if node.__class__ in (ast.Printnl, ast.Print):
+            node.dest = ast.Name('__context')
+        elif node.__class__ is ast.Const and isinstance(node.value, str):
+            try:
+                node.value.decode('ascii')
+            except UnicodeError:
+                node.value = node.value.decode('utf-8')
+        nodes.extend(node.getChildNodes())
+    return root
+
+
+class TemplateSyntaxError(SyntaxError):
+
+    def __init__(self, msg, filename, lineno):
+        from linecache import getline
+        l = getline(filename, lineno)
+        SyntaxError.__init__(self, msg, (filename, lineno, len(l) or 1, l))
+
+
+class Parser(object):
+
+    def __init__(self, gen, filename):
+        self.gen = gen
+        self.filename = filename
+        self.lineno = 1
+
+    def fail(self, msg):
+        raise TemplateSyntaxError(msg, self.filename, self.lineno)
+
+    def parse_python(self, expr, type='exec'):
+        if isinstance(expr, unicode):
+            expr = '\xef\xbb\xbf' + expr.encode('utf-8')
+        try:
+            node = parse(expr, type)
+        except SyntaxError, e:
+            raise TemplateSyntaxError(str(e), self.filename,
+                                      self.lineno + e.lineno - 1)
+        nodes = [node]
+        while nodes:
+            n = nodes.pop()
+            if hasattr(n, 'lineno'):
+                n.lineno = (n.lineno or 1) + self.lineno - 1
+            nodes.extend(n.getChildNodes())
+        return node.node
+
+    def parse(self, needle=()):
+        start_lineno = self.lineno
+        result = []
+        add = result.append
+        for self.lineno, token, value in self.gen:
+            if token == 'data':
+                add(self.parse_data(value))
+            elif token == 'code':
+                add(self.parse_code(value.splitlines()))
+            elif token == 'cmd':
+                name, args = value
+                if name in needle:
+                    return name, args, ast.Stmt(result, lineno=start_lineno)
+                if name in ('for', 'while'):
+                    add(self.parse_loop(args, name))
+                elif name == 'if':
+                    add(self.parse_if(args))
+                else:
+                    self.fail('unknown directive %s' % name)
+        if needle:
+            self.fail('unexpected end of template')
+        return ast.Stmt(result, lineno=start_lineno)
+
+    def parse_loop(self, args, type):
+        rv = self.parse_python('%s %s: pass' % (type, args), 'exec').nodes[0]
+        tag, value, rv.body = self.parse(('end' + type, 'else'))
+        if value:
+            self.fail('unexpected data after ' + tag)
+        if tag == 'else':
+            tag, value, rv.else_ = self.parse(('end' + type,))
+            if value:
+                self.fail('unexpected data after else')
+        return rv
+
+    def parse_if(self, args):
+        cond = self.parse_python('if %s: pass' % args).nodes[0]
+        tag, value, body = self.parse(('else', 'elif', 'endif'))
+        cond.tests[0] = (cond.tests[0][0], body)
+        while 1:
+            if tag == 'else':
+                if value:
+                    self.fail('unexpected data after else')
+                tag, value, cond.else_ = self.parse(('endif',))
+            elif tag == 'elif':
+                expr = self.parse_python(value, 'eval')
+                tag, value, body = self.parse(('else', 'elif', 'endif'))
+                cond.tests.append((expr, body))
+                continue
+            break
+        if value:
+            self.fail('unexpected data after endif')
+        return cond
+
+    def parse_code(self, lines):
+        margin = sys.maxint
+        for line in lines[1:]:
+            content = len(line.lstrip())
+            if content:
+                indent = len(line) - content
+                margin = min(margin, indent)
+        if lines:
+            lines[0] = lines[0].lstrip()
+        if margin < sys.maxint:
+            for i in xrange(1, len(lines)):
+                lines[i] = lines[i][margin:]
+        while lines and not lines[-1]:
+            lines.pop()
+        while lines and not lines[0]:
+            lines.pop(0)
+        return self.parse_python('\n'.join(lines))
+
+    def parse_data(self, text):
+        start_lineno = lineno = self.lineno
+        pos = 0
+        end = len(text)
+        nodes = []
+
+        def match_or_fail(pos):
+            match = token_re.match(text, pos)
+            if match is None:
+                self.fail('invalid syntax')
+            return match.group().strip(), match.end()
+
+        def write_expr(code):
+            node = self.parse_python(code, 'eval')
+            nodes.append(call_stmt('__to_unicode', [node], lineno))
+            return code.count('\n')
+
+        def write_data(value):
+            if value:
+                nodes.append(ast.Const(value, lineno=lineno))
+                return value.count('\n')
+            return 0
+
+        while 1:
+            offset = text.find('$', pos)
+            if offset < 0:
+                break
+            next = text[offset + 1]
+
+            if next == '{':
+                lineno += write_data(text[pos:offset])
+                pos = offset + 2
+                level = 1
+                while level:
+                    token, pos = match_or_fail(pos)
+                    if token in ('{', '}'):
+                        level += token == '{' and 1 or -1
+                lineno += write_expr(text[offset + 2:pos - 1])
+            elif next in namestart_chars:
+                lineno += write_data(text[pos:offset])
+                token, pos = match_or_fail(offset + 1)
+                while pos < end:
+                    if text[pos] == '.' and pos + 1 < end and \
+                       text[pos + 1] in namestart_chars:
+                        token, pos = match_or_fail(pos + 1)
+                    elif text[pos] in '([':
+                        pos += 1
+                        level = 1
+                        while level:
+                            token, pos = match_or_fail(pos)
+                            if token in ('(', ')', '[', ']'):
+                                level += token in '([' and 1 or -1
+                    else:
+                        break
+                lineno += write_expr(text[offset + 1:pos])
+            else:
+                lineno += write_data(text[pos:offset + 1])
+                pos = offset + 1 + (next == '$')
+        write_data(text[pos:])
+
+        return ast.Discard(call_stmt(len(nodes) == 1 and '__write' or
+                           '__write_many', nodes, start_lineno),
+                           lineno=start_lineno)
+
+
+class Context(object):
+
+    def __init__(self, namespace, charset, errors):
+        self.charset = charset
+        self.errors = errors
+        self._namespace = namespace
+        self._buffer = []
+        self._write = self._buffer.append
+        _extend = self._buffer.extend
+        self.runtime = dict(
+            Undefined=undefined,
+            __to_unicode=self.to_unicode,
+            __context=self,
+            __write=self._write,
+            __write_many=lambda *a: _extend(a)
+        )
+
+    def write(self, value):
+        self._write(self.to_unicode(value))
+
+    def to_unicode(self, value):
+        if isinstance(value, str):
+            return _decode_unicode(value, self.charset, self.errors)
+        return unicode(value)
+
+    def get_value(self, as_unicode=True):
+        rv = u''.join(self._buffer)
+        if not as_unicode:
+            return rv.encode(self.charset, self.errors)
+        return rv
+
+    def __getitem__(self, key, default=undefined):
+        try:
+            return self._namespace[key]
+        except KeyError:
+            return getattr(builtins, key, default)
+
+    def get(self, key, default=None):
+        return self.__getitem__(key, default)
+
+    def __setitem__(self, key, value):
+        self._namespace[key] = value
+
+    def __delitem__(self, key):
+        del self._namespace[key]
+
+
+class TemplateCodeGenerator(ModuleCodeGenerator):
+
+    def __init__(self, node, filename):
+        ModuleCodeGenerator.__init__(self, transform(node, filename))
+
+    def _nameOp(self, prefix, name):
+        if name in runtime_vars:
+            return self.emit(prefix + '_GLOBAL', name)
+        return ModuleCodeGenerator._nameOp(self, prefix, name)
+
+
+class Template(object):
+    """Represents a simple text based template.  It's a good idea to load such
+    templates from files on the file system to get better debug output.
+    """
+
+    default_context = {
+        'escape':           utils.escape,
+        'url_quote':        urls.url_quote,
+        'url_quote_plus':   urls.url_quote_plus,
+        'url_encode':       urls.url_encode
+    }
+
+    def __init__(self, source, filename='<template>', charset='utf-8',
+                 errors='strict', unicode_mode=True):
+        if isinstance(source, str):
+            source = _decode_unicode(source, charset, errors)
+        if isinstance(filename, unicode):
+            filename = filename.encode('utf-8')
+        node = Parser(tokenize(u'\n'.join(source.splitlines()),
+                               filename), filename).parse()
+        self.code = TemplateCodeGenerator(node, filename).getCode()
+        self.filename = filename
+        self.charset = charset
+        self.errors = errors
+        self.unicode_mode = unicode_mode
+
+    @classmethod
+    def from_file(cls, file, charset='utf-8', errors='strict',
+                  unicode_mode=True):
+        """Load a template from a file.
+
+        .. versionchanged:: 0.5
+            The encoding parameter was renamed to charset.
+
+        :param file: a filename or file object to load the template from.
+        :param charset: the charset of the template to load.
+        :param errors: the error behavior of the charset decoding.
+        :param unicode_mode: set to `False` to disable unicode mode.
+        :return: a template
+        """
+        close = False
+        if isinstance(file, basestring):
+            f = open(file, 'r')
+            close = True
+        try:
+            data = _decode_unicode(f.read(), charset, errors)
+        finally:
+            if close:
+                f.close()
+        return cls(data, getattr(f, 'name', '<template>'), charset,
+                   errors, unicode_mode)
+
+    def render(self, *args, **kwargs):
+        """This function accepts either a dict or some keyword arguments which
+        will then be the context the template is evaluated in.  The return
+        value will be the rendered template.
+
+        :param context: the function accepts the same arguments as the
+                        :class:`dict` constructor.
+        :return: the rendered template as string
+        """
+        ns = self.default_context.copy()
+        if len(args) == 1 and isinstance(args[0], MultiDict):
+            ns.update(args[0].to_dict(flat=True))
+        else:
+            ns.update(dict(*args))
+        if kwargs:
+            ns.update(kwargs)
+        context = Context(ns, self.charset, self.errors)
+        exec self.code in context.runtime, context
+        return context.get_value(self.unicode_mode)
+
+    def substitute(self, *args, **kwargs):
+        """For API compatibility with `string.Template`."""
+        return self.render(*args, **kwargs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/test.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,802 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.test
+    ~~~~~~~~~~~~~
+
+    This module implements a client to WSGI applications for testing.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+import urlparse
+import mimetypes
+from time import time
+from random import random
+from itertools import chain
+from tempfile import TemporaryFile
+from cStringIO import StringIO
+from cookielib import CookieJar
+from urllib2 import Request as U2Request
+
+from werkzeug._internal import _empty_stream, _get_environ
+from werkzeug.wrappers import BaseRequest
+from werkzeug.urls import url_encode, url_fix, iri_to_uri, _unquote
+from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
+from werkzeug.datastructures import FileMultiDict, MultiDict, \
+     CombinedMultiDict, Headers, FileStorage
+
+
+def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
+                            boundary=None, charset='utf-8'):
+    """Encode a dict of values (either strings or file descriptors or
+    :class:`FileStorage` objects.) into a multipart encoded string stored
+    in a file descriptor.
+    """
+    if boundary is None:
+        boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
+    _closure = [StringIO(), 0, False]
+
+    if use_tempfile:
+        def write(string):
+            stream, total_length, on_disk = _closure
+            if on_disk:
+                stream.write(string)
+            else:
+                length = len(string)
+                if length + _closure[1] <= threshold:
+                    stream.write(string)
+                else:
+                    new_stream = TemporaryFile('wb+')
+                    new_stream.write(stream.getvalue())
+                    new_stream.write(string)
+                    _closure[0] = new_stream
+                    _closure[2] = True
+                _closure[1] = total_length + length
+    else:
+        write = _closure[0].write
+
+    if not isinstance(values, MultiDict):
+        values = MultiDict(values)
+
+    for key, values in values.iterlists():
+        for value in values:
+            write('--%s\r\nContent-Disposition: form-data; name="%s"' %
+                  (boundary, key))
+            reader = getattr(value, 'read', None)
+            if reader is not None:
+                filename = getattr(value, 'filename',
+                                   getattr(value, 'name', None))
+                content_type = getattr(value, 'content_type', None)
+                if content_type is None:
+                    content_type = filename and \
+                        mimetypes.guess_type(filename)[0] or \
+                        'application/octet-stream'
+                if filename is not None:
+                    write('; filename="%s"\r\n' % filename)
+                else:
+                    write('\r\n')
+                write('Content-Type: %s\r\n\r\n' % content_type)
+                while 1:
+                    chunk = reader(16384)
+                    if not chunk:
+                        break
+                    write(chunk)
+            else:
+                if isinstance(value, unicode):
+                    value = value.encode(charset)
+                write('\r\n\r\n' + value)
+            write('\r\n')
+    write('--%s--\r\n' % boundary)
+
+    length = int(_closure[0].tell())
+    _closure[0].seek(0)
+    return _closure[0], length, boundary
+
+
+def encode_multipart(values, boundary=None, charset='utf-8'):
+    """Like `stream_encode_multipart` but returns a tuple in the form
+    (``boundary``, ``data``) where data is a bytestring.
+    """
+    stream, length, boundary = stream_encode_multipart(
+        values, use_tempfile=False, boundary=boundary, charset=charset)
+    return boundary, stream.read()
+
+
+def File(fd, filename=None, mimetype=None):
+    """Backwards compat."""
+    from warnings import warn
+    warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
+                            'EnvironBuilder or FileStorage instead'))
+    return FileStorage(fd, filename=filename, content_type=mimetype)
+
+
+class _TestCookieHeaders(object):
+    """A headers adapter for cookielib
+    """
+
+    def __init__(self, headers):
+        self.headers = headers
+
+    def getheaders(self, name):
+        headers = []
+        name = name.lower()
+        for k, v in self.headers:
+            if k.lower() == name:
+                headers.append(v)
+        return headers
+
+
+class _TestCookieResponse(object):
+    """Something that looks like a httplib.HTTPResponse, but is actually just an
+    adapter for our test responses to make them available for cookielib.
+    """
+
+    def __init__(self, headers):
+        self.headers = _TestCookieHeaders(headers)
+
+    def info(self):
+        return self.headers
+
+
+class _TestCookieJar(CookieJar):
+    """A cookielib.CookieJar modified to inject and read cookie headers from
+    and to wsgi environments, and wsgi application responses.
+    """
+
+    def inject_wsgi(self, environ):
+        """Inject the cookies as client headers into the server's wsgi
+        environment.
+        """
+        cvals = []
+        for cookie in self:
+            cvals.append('%s=%s' % (cookie.name, cookie.value))
+        if cvals:
+            environ['HTTP_COOKIE'] = ', '.join(cvals)
+
+    def extract_wsgi(self, environ, headers):
+        """Extract the server's set-cookie headers as cookies into the
+        cookie jar.
+        """
+        self.extract_cookies(
+            _TestCookieResponse(headers),
+            U2Request(get_current_url(environ)),
+        )
+
+
+def _iter_data(data):
+    """Iterates over a dict or multidict yielding all keys and values.
+    This is used to iterate over the data passed to the
+    :class:`EnvironBuilder`.
+    """
+    if isinstance(data, MultiDict):
+        for key, values in data.iterlists():
+            for value in values:
+                yield key, value
+    else:
+        for key, values in data.iteritems():
+            if isinstance(values, list):
+                for value in values:
+                    yield key, value
+            else:
+                yield key, values
+
+
+class EnvironBuilder(object):
+    """This class can be used to conveniently create a WSGI environment
+    for testing purposes.  It can be used to quickly create WSGI environments
+    or request objects from arbitrary data.
+
+    The signature of this class is also used in some other places as of
+    Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
+    :meth:`Client.open`).  Because of this most of the functionality is
+    available through the constructor alone.
+
+    Files and regular form data can be manipulated independently of each
+    other with the :attr:`form` and :attr:`files` attributes, but are
+    passed with the same argument to the constructor: `data`.
+
+    `data` can be any of these values:
+
+    -   a `str`: If it's a string it is converted into a :attr:`input_stream`,
+        the :attr:`content_length` is set and you have to provide a
+        :attr:`content_type`.
+    -   a `dict`: If it's a dict the keys have to be strings and the values
+        any of the following objects:
+
+        -   a :class:`file`-like object.  These are converted into
+            :class:`FileStorage` objects automatically.
+        -   a tuple.  The :meth:`~FileMultiDict.add_file` method is called
+            with the tuple items as positional arguments.
+
+    .. versionadded:: 0.6
+       `path` and `base_url` can now be unicode strings that are encoded using
+       the :func:`iri_to_uri` function.
+
+    :param path: the path of the request.  In the WSGI environment this will
+                 end up as `PATH_INFO`.  If the `query_string` is not defined
+                 and there is a question mark in the `path` everything after
+                 it is used as query string.
+    :param base_url: the base URL is a URL that is used to extract the WSGI
+                     URL scheme, host (server name + server port) and the
+                     script root (`SCRIPT_NAME`).
+    :param query_string: an optional string or dict with URL parameters.
+    :param method: the HTTP method to use, defaults to `GET`.
+    :param input_stream: an optional input stream.  Do not specify this and
+                         `data`.  As soon as an input stream is set you can't
+                         modify :attr:`args` and :attr:`files` unless you
+                         set the :attr:`input_stream` to `None` again.
+    :param content_type: The content type for the request.  As of 0.5 you
+                         don't have to provide this when specifying files
+                         and form data via `data`.
+    :param content_length: The content length for the request.  You don't
+                           have to specify this when providing data via
+                           `data`.
+    :param errors_stream: an optional error stream that is used for
+                          `wsgi.errors`.  Defaults to :data:`stderr`.
+    :param multithread: controls `wsgi.multithread`.  Defaults to `False`.
+    :param multiprocess: controls `wsgi.multiprocess`.  Defaults to `False`.
+    :param run_once: controls `wsgi.run_once`.  Defaults to `False`.
+    :param headers: an optional list or :class:`Headers` object of headers.
+    :param data: a string or dict of form data.  See explanation above.
+    :param environ_base: an optional dict of environment defaults.
+    :param environ_overrides: an optional dict of environment overrides.
+    :param charset: the charset used to encode unicode data.
+    """
+
+    #: the server protocol to use.  defaults to HTTP/1.1
+    server_protocol = 'HTTP/1.1'
+
+    #: the wsgi version to use.  defaults to (1, 0)
+    wsgi_version = (1, 0)
+
+    #: the default request class for :meth:`get_request`
+    request_class = BaseRequest
+
+    def __init__(self, path='/', base_url=None, query_string=None,
+                 method='GET', input_stream=None, content_type=None,
+                 content_length=None, errors_stream=None, multithread=False,
+                 multiprocess=False, run_once=False, headers=None, data=None,
+                 environ_base=None, environ_overrides=None, charset='utf-8'):
+        if query_string is None and '?' in path:
+            path, query_string = path.split('?', 1)
+        self.charset = charset
+        if isinstance(path, unicode):
+            path = iri_to_uri(path, charset)
+        self.path = path
+        if base_url is not None:
+            if isinstance(base_url, unicode):
+                base_url = iri_to_uri(base_url, charset)
+            else:
+                base_url = url_fix(base_url, charset)
+        self.base_url = base_url
+        if isinstance(query_string, basestring):
+            self.query_string = query_string
+        else:
+            if query_string is None:
+                query_string = MultiDict()
+            elif not isinstance(query_string, MultiDict):
+                query_string = MultiDict(query_string)
+            self.args = query_string
+        self.method = method
+        if headers is None:
+            headers = Headers()
+        elif not isinstance(headers, Headers):
+            headers = Headers(headers)
+        self.headers = headers
+        self.content_type = content_type
+        if errors_stream is None:
+            errors_stream = sys.stderr
+        self.errors_stream = errors_stream
+        self.multithread = multithread
+        self.multiprocess = multiprocess
+        self.run_once = run_once
+        self.environ_base = environ_base
+        self.environ_overrides = environ_overrides
+        self.input_stream = input_stream
+        self.content_length = content_length
+        self.closed = False
+
+        if data:
+            if input_stream is not None:
+                raise TypeError('can\'t provide input stream and data')
+            if isinstance(data, basestring):
+                self.input_stream = StringIO(data)
+                if self.content_length is None:
+                    self.content_length = len(data)
+            else:
+                for key, value in _iter_data(data):
+                    if isinstance(value, (tuple, dict)) or \
+                       hasattr(value, 'read'):
+                        self._add_file_from_data(key, value)
+                    else:
+                        self.form.setlistdefault(key).append(value)
+
+    def _add_file_from_data(self, key, value):
+        """Called in the EnvironBuilder to add files from the data dict."""
+        if isinstance(value, tuple):
+            self.files.add_file(key, *value)
+        elif isinstance(value, dict):
+            from warnings import warn
+            warn(DeprecationWarning('it\'s no longer possible to pass dicts '
+                                    'as `data`.  Use tuples or FileStorage '
+                                    'objects instead'), stacklevel=2)
+            value = dict(value)
+            mimetype = value.pop('mimetype', None)
+            if mimetype is not None:
+                value['content_type'] = mimetype
+            self.files.add_file(key, **value)
+        else:
+            self.files.add_file(key, value)
+
+    def _get_base_url(self):
+        return urlparse.urlunsplit((self.url_scheme, self.host,
+                                    self.script_root, '', '')).rstrip('/') + '/'
+
+    def _set_base_url(self, value):
+        if value is None:
+            scheme = 'http'
+            netloc = 'localhost'
+            scheme = 'http'
+            script_root = ''
+        else:
+            scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
+            if qs or anchor:
+                raise ValueError('base url must not contain a query string '
+                                 'or fragment')
+        self.script_root = script_root.rstrip('/')
+        self.host = netloc
+        self.url_scheme = scheme
+
+    base_url = property(_get_base_url, _set_base_url, doc='''
+        The base URL is a URL that is used to extract the WSGI
+        URL scheme, host (server name + server port) and the
+        script root (`SCRIPT_NAME`).''')
+    del _get_base_url, _set_base_url
+
+    def _get_content_type(self):
+        ct = self.headers.get('Content-Type')
+        if ct is None and not self._input_stream:
+            if self.method in ('POST', 'PUT'):
+                if self._files:
+                    return 'multipart/form-data'
+                return 'application/x-www-form-urlencoded'
+            return None
+        return ct
+
+    def _set_content_type(self, value):
+        if value is None:
+            self.headers.pop('Content-Type', None)
+        else:
+            self.headers['Content-Type'] = value
+
+    content_type = property(_get_content_type, _set_content_type, doc='''
+        The content type for the request.  Reflected from and to the
+        :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.''')
+    del _get_content_type, _set_content_type
+
+    def _get_content_length(self):
+        return self.headers.get('Content-Length', type=int)
+
+    def _set_content_length(self, value):
+        if value is None:
+            self.headers.pop('Content-Length', None)
+        else:
+            self.headers['Content-Length'] = str(value)
+
+    content_length = property(_get_content_length, _set_content_length, doc='''
+        The content length as integer.  Reflected from and to the
+        :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.''')
+    del _get_content_length, _set_content_length
+
+    def form_property(name, storage, doc):
+        key = '_' + name
+        def getter(self):
+            if self._input_stream is not None:
+                raise AttributeError('an input stream is defined')
+            rv = getattr(self, key)
+            if rv is None:
+                rv = storage()
+                setattr(self, key, rv)
+            return rv
+        def setter(self, value):
+            self._input_stream = None
+            setattr(self, key, value)
+        return property(getter, setter, doc)
+
+    form = form_property('form', MultiDict, doc='''
+        A :class:`MultiDict` of form values.''')
+    files = form_property('files', FileMultiDict, doc='''
+        A :class:`FileMultiDict` of uploaded files.  You can use the
+        :meth:`~FileMultiDict.add_file` method to add new files to the
+        dict.''')
+    del form_property
+
+    def _get_input_stream(self):
+        return self._input_stream
+
+    def _set_input_stream(self, value):
+        self._input_stream = value
+        self._form = self._files = None
+
+    input_stream = property(_get_input_stream, _set_input_stream, doc='''
+        An optional input stream.  If you set this it will clear
+        :attr:`form` and :attr:`files`.''')
+    del _get_input_stream, _set_input_stream
+
+    def _get_query_string(self):
+        if self._query_string is None:
+            if self._args is not None:
+                return url_encode(self._args, charset=self.charset)
+            return ''
+        return self._query_string
+
+    def _set_query_string(self, value):
+        self._query_string = value
+        self._args = None
+
+    query_string = property(_get_query_string, _set_query_string, doc='''
+        The query string.  If you set this to a string :attr:`args` will
+        no longer be available.''')
+    del _get_query_string, _set_query_string
+
+    def _get_args(self):
+        if self._query_string is not None:
+            raise AttributeError('a query string is defined')
+        if self._args is None:
+            self._args = MultiDict()
+        return self._args
+
+    def _set_args(self, value):
+        self._query_string = None
+        self._args = value
+
+    args = property(_get_args, _set_args, doc='''
+        The URL arguments as :class:`MultiDict`.''')
+    del _get_args, _set_args
+
+    @property
+    def server_name(self):
+        """The server name (read-only, use :attr:`host` to set)"""
+        return self.host.split(':', 1)[0]
+
+    @property
+    def server_port(self):
+        """The server port as integer (read-only, use :attr:`host` to set)"""
+        pieces = self.host.split(':', 1)
+        if len(pieces) == 2 and pieces[1].isdigit():
+            return int(pieces[1])
+        elif self.url_scheme == 'https':
+            return 443
+        return 80
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """Closes all files.  If you put real :class:`file` objects into the
+        :attr:`files` dict you can call this method to automatically close
+        them all in one go.
+        """
+        if self.closed:
+            return
+        try:
+            files = self.files.itervalues()
+        except AttributeError:
+            files = ()
+        for f in files:
+            try:
+                f.close()
+            except Exception, e:
+                pass
+        self.closed = True
+
+    def get_environ(self):
+        """Return the built environ."""
+        input_stream = self.input_stream
+        content_length = self.content_length
+        content_type = self.content_type
+
+        if input_stream is not None:
+            start_pos = input_stream.tell()
+            input_stream.seek(0, 2)
+            end_pos = input_stream.tell()
+            input_stream.seek(start_pos)
+            content_length = end_pos - start_pos
+        elif content_type == 'multipart/form-data':
+            values = CombinedMultiDict([self.form, self.files])
+            input_stream, content_length, boundary = \
+                stream_encode_multipart(values, charset=self.charset)
+            content_type += '; boundary="%s"' % boundary
+        elif content_type == 'application/x-www-form-urlencoded':
+            values = url_encode(self.form, charset=self.charset)
+            content_length = len(values)
+            input_stream = StringIO(values)
+        else:
+            input_stream = _empty_stream
+
+        result = {}
+        if self.environ_base:
+            result.update(self.environ_base)
+
+        def _path_encode(x):
+            if isinstance(x, unicode):
+                x = x.encode(self.charset)
+            return _unquote(x)
+
+        result.update({
+            'REQUEST_METHOD':       self.method,
+            'SCRIPT_NAME':          _path_encode(self.script_root),
+            'PATH_INFO':            _path_encode(self.path),
+            'QUERY_STRING':         self.query_string,
+            'SERVER_NAME':          self.server_name,
+            'SERVER_PORT':          str(self.server_port),
+            'HTTP_HOST':            self.host,
+            'SERVER_PROTOCOL':      self.server_protocol,
+            'CONTENT_TYPE':         content_type or '',
+            'CONTENT_LENGTH':       str(content_length or '0'),
+            'wsgi.version':         self.wsgi_version,
+            'wsgi.url_scheme':      self.url_scheme,
+            'wsgi.input':           input_stream,
+            'wsgi.errors':          self.errors_stream,
+            'wsgi.multithread':     self.multithread,
+            'wsgi.multiprocess':    self.multiprocess,
+            'wsgi.run_once':        self.run_once
+        })
+        for key, value in self.headers.to_list(self.charset):
+            result['HTTP_%s' % key.upper().replace('-', '_')] = value
+        if self.environ_overrides:
+            result.update(self.environ_overrides)
+        return result
+
+    def get_request(self, cls=None):
+        """Returns a request with the data.  If the request class is not
+        specified :attr:`request_class` is used.
+
+        :param cls: The request wrapper to use.
+        """
+        if cls is None:
+            cls = self.request_class
+        return cls(self.get_environ())
+
+
+class ClientRedirectError(Exception):
+    """
+    If a redirect loop is detected when using follow_redirects=True with
+    the :cls:`Client`, then this exception is raised.
+    """
+
+
+class Client(object):
+    """This class allows to send requests to a wrapped application.
+
+    The response wrapper can be a class or factory function that takes
+    three arguments: app_iter, status and headers.  The default response
+    wrapper just returns a tuple.
+
+    Example::
+
+        class ClientResponse(BaseResponse):
+            ...
+
+        client = Client(MyApplication(), response_wrapper=ClientResponse)
+
+    The use_cookies parameter indicates whether cookies should be stored and
+    sent for subsequent requests. This is True by default, but passing False
+    will disable this behaviour.
+
+    .. versionadded:: 0.5
+       `use_cookies` is new in this version.  Older versions did not provide
+       builtin cookie support.
+    """
+
+    def __init__(self, application, response_wrapper=None, use_cookies=True):
+        self.application = application
+        if response_wrapper is None:
+            response_wrapper = lambda a, s, h: (a, s, h)
+        self.response_wrapper = response_wrapper
+        if use_cookies:
+            self.cookie_jar = _TestCookieJar()
+        else:
+            self.cookie_jar = None
+        self.redirect_client = None
+
+    def open(self, *args, **kwargs):
+        """Takes the same arguments as the :class:`EnvironBuilder` class with
+        some additions:  You can provide a :class:`EnvironBuilder` or a WSGI
+        environment as only argument instead of the :class:`EnvironBuilder`
+        arguments and two optional keyword arguments (`as_tuple`, `buffered`)
+        that change the type of the return value or the way the application is
+        executed.
+
+        .. versionchanged:: 0.5
+           If a dict is provided as file in the dict for the `data` parameter
+           the content type has to be called `content_type` now instead of
+           `mimetype`.  This change was made for consistency with
+           :class:`werkzeug.FileWrapper`.
+
+            The `follow_redirects` parameter was added to :func:`open`.
+
+        Additional parameters:
+
+        :param as_tuple: Returns a tuple in the form ``(environ, result)``
+        :param buffered: Set this to True to buffer the application run.
+                         This will automatically close the application for
+                         you as well.
+        :param follow_redirects: Set this to True if the `Client` should
+                                 follow HTTP redirects.
+        """
+        as_tuple = kwargs.pop('as_tuple', False)
+        buffered = kwargs.pop('buffered', False)
+        follow_redirects = kwargs.pop('follow_redirects', False)
+        environ = None
+        if not kwargs and len(args) == 1:
+            if isinstance(args[0], EnvironBuilder):
+                environ = args[0].get_environ()
+            elif isinstance(args[0], dict):
+                environ = args[0]
+        if environ is None:
+            builder = EnvironBuilder(*args, **kwargs)
+            try:
+                environ = builder.get_environ()
+            finally:
+                builder.close()
+
+        if self.cookie_jar is not None:
+            self.cookie_jar.inject_wsgi(environ)
+        rv = run_wsgi_app(self.application, environ, buffered=buffered)
+        if self.cookie_jar is not None:
+            self.cookie_jar.extract_wsgi(environ, rv[2])
+
+        # handle redirects
+        redirect_chain = []
+        status_code = int(rv[1].split(None, 1)[0])
+        while status_code in (301, 302, 303, 305, 307) and follow_redirects:
+            if not self.redirect_client:
+                # assume that we're not using the user defined response wrapper
+                # so that we don't need any ugly hacks to get the status
+                # code from the response.
+                self.redirect_client = Client(self.application)
+                self.redirect_client.cookie_jar = self.cookie_jar
+
+            redirect = dict(rv[2])['Location']
+            scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(redirect)
+            base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/'
+            host = get_host(create_environ('/', base_url, query_string=qs)).split(':', 1)[0]
+            if get_host(environ).split(':', 1)[0] != host:
+                raise RuntimeError('%r does not support redirect to '
+                                   'external targets' % self.__class__)
+
+            redirect_chain.append((redirect, status_code))
+
+            # the redirect request should be a new request, and not be based on
+            # the old request
+            redirect_kwargs = {
+                'path':             script_root,
+                'base_url':         base_url,
+                'query_string':     qs,
+                'as_tuple':         True,
+                'buffered':         buffered,
+                'follow_redirects': False,
+            }
+            environ, rv = self.redirect_client.open(**redirect_kwargs)
+            status_code = int(rv[1].split(None, 1)[0])
+
+            # Prevent loops
+            if redirect_chain[-1] in redirect_chain[:-1]:
+                raise ClientRedirectError("loop detected")
+
+        response = self.response_wrapper(*rv)
+        if as_tuple:
+            return environ, response
+        return response
+
+    def get(self, *args, **kw):
+        """Like open but method is enforced to GET."""
+        kw['method'] = 'GET'
+        return self.open(*args, **kw)
+
+    def post(self, *args, **kw):
+        """Like open but method is enforced to POST."""
+        kw['method'] = 'POST'
+        return self.open(*args, **kw)
+
+    def head(self, *args, **kw):
+        """Like open but method is enforced to HEAD."""
+        kw['method'] = 'HEAD'
+        return self.open(*args, **kw)
+
+    def put(self, *args, **kw):
+        """Like open but method is enforced to PUT."""
+        kw['method'] = 'PUT'
+        return self.open(*args, **kw)
+
+    def delete(self, *args, **kw):
+        """Like open but method is enforced to DELETE."""
+        kw['method'] = 'DELETE'
+        return self.open(*args, **kw)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.application
+        )
+
+
+def create_environ(*args, **kwargs):
+    """Create a new WSGI environ dict based on the values passed.  The first
+    parameter should be the path of the request which defaults to '/'.  The
+    second one can either be an absolute path (in that case the host is
+    localhost:80) or a full path to the request with scheme, netloc port and
+    the path to the script.
+
+    This accepts the same arguments as the :class:`EnvironBuilder`
+    constructor.
+
+    .. versionchanged:: 0.5
+       This function is now a thin wrapper over :class:`EnvironBuilder` which
+       was added in 0.5.  The `headers`, `environ_base`, `environ_overrides`
+       and `charset` parameters were added.
+    """
+    builder = EnvironBuilder(*args, **kwargs)
+    try:
+        return builder.get_environ()
+    finally:
+        builder.close()
+
+
+def run_wsgi_app(app, environ, buffered=False):
+    """Return a tuple in the form (app_iter, status, headers) of the
+    application output.  This works best if you pass it an application that
+    returns an iterator all the time.
+
+    Sometimes applications may use the `write()` callable returned
+    by the `start_response` function.  This tries to resolve such edge
+    cases automatically.  But if you don't get the expected output you
+    should set `buffered` to `True` which enforces buffering.
+
+    If passed an invalid WSGI application the behavior of this function is
+    undefined.  Never pass non-conforming WSGI applications to this function.
+
+    :param app: the application to execute.
+    :param buffered: set to `True` to enforce buffering.
+    :return: tuple in the form ``(app_iter, status, headers)``
+    """
+    environ = _get_environ(environ)
+    response = []
+    buffer = []
+
+    def start_response(status, headers, exc_info=None):
+        if exc_info is not None:
+            raise exc_info[0], exc_info[1], exc_info[2]
+        response[:] = [status, headers]
+        return buffer.append
+
+    app_iter = app(environ, start_response)
+
+    # when buffering we emit the close call early and convert the
+    # application iterator into a regular list
+    if buffered:
+        close_func = getattr(app_iter, 'close', None)
+        try:
+            app_iter = list(app_iter)
+        finally:
+            if close_func is not None:
+                close_func()
+
+    # otherwise we iterate the application iter until we have
+    # a response, chain the already received data with the already
+    # collected data and wrap it in a new `ClosingIterator` if
+    # we have a close callable.
+    else:
+        while not response:
+            buffer.append(app_iter.next())
+        if buffer:
+            close_func = getattr(app_iter, 'close', None)
+            app_iter = chain(buffer, app_iter)
+            if close_func is not None:
+                app_iter = ClosingIterator(app_iter, close_func)
+
+    return app_iter, response[0], response[1]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/testapp.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.testapp
+    ~~~~~~~~~~~~~~~~
+
+    Provide a small test application that can be used to test a WSGI server
+    and check it for WSGI compliance.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+from werkzeug.templates import Template
+from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
+
+
+logo = Response('''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
+//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
+nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
+7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
+ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
+m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
+p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
+SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
+78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
+ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
+tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
+w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
+lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
+Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
+yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
+dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
+idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
+EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
+ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
+gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
+JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
+Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
+YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
+c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
+qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
+cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
+cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
+KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
+EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
+UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
+Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
+aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
+kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
+='''.decode('base64'), mimetype='image/png')
+
+
+TEMPLATE = Template(ur'''\
+<%py
+    import sys, os
+    from textwrap import wrap
+    import werkzeug
+    from werkzeug.testapp import iter_sys_path
+    try:
+        import pkg_resources
+    except ImportError:
+        eggs = None
+    else:
+        eggs = list(pkg_resources.working_set)
+        eggs.sort(lambda a, b: cmp(a.project_name.lower(),
+                                   b.project_name.lower()))
+    sorted_environ = req.environ.items()
+    sorted_environ.sort(lambda a, b: cmp(str(a[0]).lower(), str(b[0]).lower()))
+%>
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<title>WSGI Information</title>
+<style type="text/css">
+  body       { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+               'Verdana', sans-serif; background-color: #AFC1C4; Color: #000;
+               text-align: center; margin: 1em; padding: 0; }
+  #logo      { float: right; padding: 10px; }
+  div.box    { text-align: left; width: 45em; padding: 1em; margin: auto;
+               border: 1px solid #aaa; background-color: white; }
+  h1         { color: #11557C; font-size: 2em; margin: 0 0 0.8em 0; }
+  h2         { font-size: 1.4em; margin: 1em 0 0.5em 0; }
+  table      { width: 100%; border-collapse: collapse; border: 1px solid #AFC5C9 }
+  Table th   { background-color: #AFC1C4; color: white; font-size: 0.72em;
+               font-weight: normal; width: 18em; vertical-align: top;
+               padding: 0.5em 0 0.1em 0.5em; }
+  table td   { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
+  code       { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
+               monospace; font-size: 0.7em; }
+  ul li      { line-height: 1.5em; }
+  ul.path    { font-size: 0.7em; margin: 0; padding: 8px; list-style: none;
+               background: #E9F5F7; border: 1px solid #AFC5C9; }
+  ul.path li { line-height: 1.6em; }
+  li.virtual { color: #999; text-decoration: underline; }
+  li.exp     { background: white; }
+</style>
+<div class="box">
+  <img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
+  <h1>WSGI Information</h1>
+  <p>
+    This page displays all available information about the WSGI server and
+    the underlying Python interpreter.
+  <h2 id="python-interpreter">Python Interpreter</h2>
+  <table>
+    <tr>
+      <th>Python Version</th>
+      <td>${'<br>'.join(escape(sys.version).splitlines())}</td>
+    </tr>
+    <tr>
+      <th>Platform</th>
+      <td>$escape(sys.platform) [$escape(os.name)]</td>
+    </tr>
+    <tr>
+      <th>API Version</th>
+      <td>$sys.api_version</td>
+    </tr>
+    <tr>
+      <th>Byteorder</th>
+      <td>$sys.byteorder</td>
+    </tr>
+    <tr>
+      <th>Werkzeug Version</th>
+      <td>$escape(werkzeug.__version__)</td>
+    </tr>
+  </table>
+  <h2 id="wsgi-environment">WSGI Environment</h2>
+  <table>
+  <% for key, value in sorted_environ %>
+    <tr>
+      <th>$escape(str(key))</th>
+      <td><code>${' '.join(wrap(escape(repr(value))))}</code></td>
+    </tr>
+  <% endfor %>
+  </table>
+  <% if eggs %>
+  <h2 id="installed-eggs">Installed Eggs</h2>
+  <p>
+    The following python packages were installed on the system as
+    Python eggs:
+  <ul>
+  <% for egg in eggs %>
+    <li>$escape(egg.project_name) <small>[$escape(egg.version)]</small></li>
+  <% endfor %>
+  </ul>
+  <% endif %>
+  <h2 id="sys-path">Package Load Path</h2>
+  <p>
+    The following paths are the current contents of the load path.  The
+    following entries are looked up for Python packages.  Note that not
+    all items in this path are folders.  Gray and underlined items are
+    entries pointing to invalid resources or used by custom import hooks
+    such as the zip importer.
+  <p>
+    Items with a bright background were expanded for display from a relative
+    path.  If you encounter such paths in the output you might want to check
+    your setup as relative paths are usually problematic in multithreaded
+    environments.
+  <ul class="path">
+  <% for item, virtual, expanded in iter_sys_path() %>
+    <%py
+      class_ = []
+      if virtual:
+          class_.append('virtual')
+      if expanded:
+          class_.append('exp')
+      class_ = ' '.join(class_)
+    %>
+    <li<% if class_ %> class="$class_"<% endif %>>$escape(item)</li>
+  <% endfor %>
+  </ul>
+</div>''')
+
+
+def iter_sys_path():
+    if os.name == 'posix':
+        def strip(x):
+            prefix = os.path.expanduser('~')
+            if x.startswith(prefix):
+                x = '~' + x[len(prefix):]
+            return x
+    else:
+        strip = lambda x: x
+
+    cwd = os.path.abspath(os.getcwd())
+    for item in sys.path:
+        path = os.path.join(cwd, item or os.path.curdir)
+        yield strip(os.path.normpath(path)), \
+              not os.path.isdir(path), path != item
+
+
+def test_app(environ, start_response):
+    """Simple test application that dumps the environment.  You can use
+    it to check if Werkzeug is working properly:
+
+    .. sourcecode:: pycon
+
+        >>> from werkzeug import run_simple, test_app
+        >>> run_simple('localhost', 3000, test_app)
+         * Running on http://localhost:3000/
+
+    The application displays important information from the WSGI environment,
+    the Python interpreter and the installed libraries.
+    """
+    req = Request(environ, populate_request=False)
+    if req.args.get('resource') == 'logo':
+        response = logo
+    else:
+        response = Response(TEMPLATE.render(req=req), mimetype='text/html')
+    return response(environ, start_response)
+
+
+if __name__ == '__main__':
+    from werkzeug.serving import run_simple
+    run_simple('localhost', 5000, test_app, use_reloader=True)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/urls.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,463 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.urls
+    ~~~~~~~~~~~~~
+
+    This module implements various URL related functions.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import urlparse
+
+from werkzeug._internal import _decode_unicode
+from werkzeug.datastructures import MultiDict, iter_multi_items
+
+
+#: list of characters that are always safe in URLs.
+_always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                'abcdefghijklmnopqrstuvwxyz'
+                '0123456789_.-')
+_safe_map = dict((c, c) for c in _always_safe)
+for i in xrange(0x80):
+    c = chr(i)
+    if c not in _safe_map:
+        _safe_map[c] = '%%%02X' % i
+_safe_map.update((chr(i), '%%%02X' % i) for i in xrange(0x80, 0x100))
+_safemaps = {}
+
+#: lookup table for encoded characters.
+_hexdig = '0123456789ABCDEFabcdef'
+_hextochr = dict((a + b, chr(int(a + b, 16)))
+                 for a in _hexdig for b in _hexdig)
+
+
+def _quote(s, safe='/', _join=''.join):
+    assert isinstance(s, str), 'quote only works on bytes'
+    if not s or not s.rstrip(_always_safe + safe):
+        return s
+    try:
+        quoter = _safemaps[safe]
+    except KeyError:
+        safe_map = _safe_map.copy()
+        safe_map.update([(c, c) for c in safe])
+        _safemaps[safe] = quoter = safe_map.__getitem__
+    return _join(map(quoter, s))
+
+
+def _quote_plus(s, safe=''):
+    if ' ' in s:
+        return _quote(s, safe + ' ').replace(' ', '+')
+    return _quote(s, safe)
+
+
+def _safe_urlsplit(s):
+    """the urlparse.urlsplit cache breaks if it contains unicode and
+    we cannot control that.  So we force type cast that thing back
+    to what we think it is.
+    """
+    rv = urlparse.urlsplit(s)
+    if type(rv[1]) is not type(s):
+        try:
+            return tuple(map(type(s), rv))
+        except UnicodeError:
+            # oh well, we most likely will break later again, but
+            # let's just say it worked out well to that point.
+            pass
+    return rv
+
+
+def _unquote(s, unsafe=''):
+    assert isinstance(s, str), 'unquote only works on bytes'
+    rv = s.split('%')
+    if len(rv) == 1:
+        return s
+    s = rv[0]
+    for item in rv[1:]:
+        try:
+            char = _hextochr[item[:2]]
+            if char in unsafe:
+                raise KeyError()
+            s += char + item[2:]
+        except KeyError:
+            s += '%' + item
+    return s
+
+
+def _unquote_plus(s):
+    return _unquote(s.replace('+', ' '))
+
+
+def _uri_split(uri):
+    """Splits up an URI or IRI."""
+    scheme, netloc, path, query, fragment = _safe_urlsplit(uri)
+
+    port = None
+
+    if '@' in netloc:
+        auth, hostname = netloc.split('@', 1)
+    else:
+        auth = None
+        hostname = netloc
+    if hostname:
+        if ':' in hostname:
+            hostname, port = hostname.split(':', 1)
+    return scheme, auth, hostname, port, path, query, fragment
+
+
+def iri_to_uri(iri, charset='utf-8'):
+    r"""Converts any unicode based IRI to an acceptable ASCII URI.  Werkzeug
+    always uses utf-8 URLs internally because this is what browsers and HTTP
+    do as well.  In some places where it accepts an URL it also accepts a
+    unicode IRI and converts it into a URI.
+
+    Examples for IRI versus URI:
+
+    >>> iri_to_uri(u'http://☃.net/')
+    'http://xn--n3h.net/'
+    >>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
+    'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
+
+    .. versionadded:: 0.6
+
+    :param iri: the iri to convert
+    :param charset: the charset for the URI
+    """
+    iri = unicode(iri)
+    scheme, auth, hostname, port, path, query, fragment = _uri_split(iri)
+
+    scheme = scheme.encode('ascii')
+    hostname = hostname.encode('idna')
+    if auth:
+        if ':' in auth:
+            auth, password = auth.split(':', 1)
+        else:
+            password = None
+        auth = _quote(auth.encode(charset))
+        if password:
+            auth += ':' + _quote(password.encode(charset))
+        hostname = auth + '@' + hostname
+    if port:
+        hostname += ':' + port
+
+    path = _quote(path.encode(charset), safe="/:~+")
+    query = _quote(query.encode(charset), safe="=%&[]:;$()+,!?*/")
+
+    return urlparse.urlunsplit([scheme, hostname, path, query, fragment])
+
+
+def uri_to_iri(uri, charset='utf-8', errors='ignore'):
+    r"""Converts a URI in a given charset to a IRI.
+
+    Examples for URI versus IRI
+
+    >>> uri_to_iri('http://xn--n3h.net/')
+    u'http://\u2603.net/'
+    >>> uri_to_iri('http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
+    u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
+
+    Query strings are left unchanged:
+
+    >>> uri_to_iri('/?foo=24&x=%26%2f')
+    u'/?foo=24&x=%26%2f'
+
+    .. versionadded:: 0.6
+
+    :param uri: the URI to convert
+    :param charset: the charset of the URI
+    :param errors: the error handling on decode
+    """
+    uri = url_fix(str(uri), charset)
+    scheme, auth, hostname, port, path, query, fragment = _uri_split(uri)
+
+    scheme = _decode_unicode(scheme, 'ascii', errors)
+
+    try:
+        hostname = hostname.decode('idna')
+    except UnicodeError:
+        # dammit, that codec raised an error.  Because it does not support
+        # any error handling we have to fake it.... badly
+        if errors not in ('ignore', 'replace'):
+            raise
+        hostname = hostname.decode('ascii', errors)
+
+    if auth:
+        if ':' in auth:
+            auth, password = auth.split(':', 1)
+        else:
+            password = None
+        auth = _decode_unicode(_unquote(auth), charset, errors)
+        if password:
+            auth += u':' + _decode_unicode(_unquote(password),
+                                           charset, errors)
+        hostname = auth + u'@' + hostname
+    if port:
+        # port should be numeric, but you never know...
+        hostname += u':' + port.decode(charset, errors)
+
+    path = _decode_unicode(_unquote(path, '/;?'), charset, errors)
+    query = _decode_unicode(_unquote(query, ';/?:@&=+,$'),
+                            charset, errors)
+
+    return urlparse.urlunsplit([scheme, hostname, path, query, fragment])
+
+
+def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
+               errors='ignore', separator='&', cls=None):
+    """Parse a querystring and return it as :class:`MultiDict`.  Per default
+    only values are decoded into unicode strings.  If `decode_keys` is set to
+    `True` the same will happen for keys.
+
+    Per default a missing value for a key will default to an empty key.  If
+    you don't want that behavior you can set `include_empty` to `False`.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    `HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       In previous versions ";" and "&" could be used for url decoding.
+       This changed in 0.5 where only "&" is supported.  If you want to
+       use ";" instead a different `separator` can be provided.
+
+       The `cls` parameter was added.
+
+    :param s: a string with the query string to decode.
+    :param charset: the charset of the query string.
+    :param decode_keys: set to `True` if you want the keys to be decoded
+                        as well.
+    :param include_empty: Set to `False` if you don't want empty values to
+                          appear in the dict.
+    :param errors: the decoding error behavior.
+    :param separator: the pair separator to be used, defaults to ``&``
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    """
+    if cls is None:
+        cls = MultiDict
+    result = []
+    for pair in str(s).split(separator):
+        if not pair:
+            continue
+        if '=' in pair:
+            key, value = pair.split('=', 1)
+        else:
+            key = pair
+            value = ''
+        key = _unquote_plus(key)
+        if decode_keys:
+            key = _decode_unicode(key, charset, errors)
+        result.append((key, url_unquote_plus(value, charset, errors)))
+    return cls(result)
+
+
+def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
+               separator='&'):
+    """URL encode a dict/`MultiDict`.  If a value is `None` it will not appear
+    in the result string.  Per default only values are encoded into the target
+    charset strings.  If `encode_keys` is set to ``True`` unicode keys are
+    supported too.
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm.
+
+    .. versionadded:: 0.5
+        `sort`, `key`, and `separator` were added.
+
+    :param obj: the object to encode into a query string.
+    :param charset: the charset of the query string.
+    :param encode_keys: set to `True` if you have unicode keys.
+    :param sort: set to `True` if you want parameters to be sorted by `key`.
+    :param separator: the separator to be used for the pairs.
+    :param key: an optional function to be used for sorting.  For more details
+                check out the :func:`sorted` documentation.
+    """
+    iterable = iter_multi_items(obj)
+    if sort:
+        iterable = sorted(iterable, key=key)
+    tmp = []
+    for key, value in iterable:
+        if value is None:
+            continue
+        if encode_keys and isinstance(key, unicode):
+            key = key.encode(charset)
+        else:
+            key = str(key)
+        if isinstance(value, unicode):
+            value = value.encode(charset)
+        else:
+            value = str(value)
+        tmp.append('%s=%s' % (_quote(key),
+                              _quote_plus(value)))
+    return separator.join(tmp)
+
+
+def url_quote(s, charset='utf-8', safe='/:'):
+    """URL encode a single string with a given encoding.
+
+    :param s: the string to quote.
+    :param charset: the charset to be used.
+    :param safe: an optional sequence of safe characters.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset)
+    elif not isinstance(s, str):
+        s = str(s)
+    return _quote(s, safe=safe)
+
+
+def url_quote_plus(s, charset='utf-8', safe=''):
+    """URL encode a single string with the given encoding and convert
+    whitespace to "+".
+
+    :param s: the string to quote.
+    :param charset: the charset to be used.
+    :param safe: an optional sequence of safe characters.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset)
+    elif not isinstance(s, str):
+        s = str(s)
+    return _quote_plus(s, safe=safe)
+
+
+def url_unquote(s, charset='utf-8', errors='ignore'):
+    """URL decode a single string with a given decoding.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    `HTTPUnicodeError` is raised.
+
+    :param s: the string to unquote.
+    :param charset: the charset to be used.
+    :param errors: the error handling for the charset decoding.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset)
+    return _decode_unicode(_unquote(s), charset, errors)
+
+
+def url_unquote_plus(s, charset='utf-8', errors='ignore'):
+    """URL decode a single string with the given decoding and decode
+    a "+" to whitespace.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    `HTTPUnicodeError` is raised.
+
+    :param s: the string to unquote.
+    :param charset: the charset to be used.
+    :param errors: the error handling for the charset decoding.
+    """
+    return _decode_unicode(_unquote_plus(s), charset, errors)
+
+
+def url_fix(s, charset='utf-8'):
+    r"""Sometimes you get an URL by a user that just isn't a real URL because
+    it contains unsafe characters like ' ' and so on.  This function can fix
+    some of the problems in a similar way browsers handle data entered by the
+    user:
+
+    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
+    'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
+
+    :param s: the string with the URL to fix.
+    :param charset: The target charset for the URL if the url was given as
+                    unicode string.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(charset, 'ignore')
+    scheme, netloc, path, qs, anchor = _safe_urlsplit(s)
+    path = _quote(path, '/%')
+    qs = _quote_plus(qs, ':&%=')
+    return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
+
+
+class Href(object):
+    """Implements a callable that constructs URLs with the given base. The
+    function can be called with any number of positional and keyword
+    arguments which than are used to assemble the URL.  Works with URLs
+    and posix paths.
+
+    Positional arguments are appended as individual segments to
+    the path of the URL:
+
+    >>> href = Href('/foo')
+    >>> href('bar', 23)
+    '/foo/bar/23'
+    >>> href('foo', bar=23)
+    '/foo/foo?bar=23'
+
+    If any of the arguments (positional or keyword) evaluates to `None` it
+    will be skipped.  If no keyword arguments are given the last argument
+    can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
+    otherwise the keyword arguments are used for the query parameters, cutting
+    off the first trailing underscore of the parameter name:
+
+    >>> href(is_=42)
+    '/foo?is=42'
+    >>> href({'foo': 'bar'})
+    '/foo?foo=bar'
+
+    Combining of both methods is not allowed:
+
+    >>> href({'foo': 'bar'}, bar=42)
+    Traceback (most recent call last):
+      ...
+    TypeError: keyword arguments and query-dicts can't be combined
+
+    Accessing attributes on the href object creates a new href object with
+    the attribute name as prefix:
+
+    >>> bar_href = href.bar
+    >>> bar_href("blub")
+    '/foo/bar/blub'
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm:
+
+    >>> href = Href("/", sort=True)
+    >>> href(a=1, b=2, c=3)
+    '/?a=1&b=2&c=3'
+
+    .. versionadded:: 0.5
+        `sort` and `key` were added.
+    """
+
+    def __init__(self, base='./', charset='utf-8', sort=False, key=None):
+        if not base:
+            base = './'
+        self.base = base
+        self.charset = charset
+        self.sort = sort
+        self.key = key
+
+    def __getattr__(self, name):
+        if name[:2] == '__':
+            raise AttributeError(name)
+        base = self.base
+        if base[-1:] != '/':
+            base += '/'
+        return Href(urlparse.urljoin(base, name), self.charset, self.sort,
+                    self.key)
+
+    def __call__(self, *path, **query):
+        if path and isinstance(path[-1], dict):
+            if query:
+                raise TypeError('keyword arguments and query-dicts '
+                                'can\'t be combined')
+            query, path = path[-1], path[:-1]
+        elif query:
+            query = dict([(k.endswith('_') and k[:-1] or k, v)
+                          for k, v in query.items()])
+        path = '/'.join([url_quote(x, self.charset) for x in path
+                         if x is not None]).lstrip('/')
+        rv = self.base
+        if path:
+            if not rv.endswith('/'):
+                rv += '/'
+            rv = urlparse.urljoin(rv, path)
+        if query:
+            rv += '?' + url_encode(query, self.charset, sort=self.sort,
+                                   key=self.key)
+        return str(rv)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/useragents.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.useragents
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module provides a helper to inspect user agent strings.  This module
+    is far from complete but should work for most of the currently available
+    browsers.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+
+
+class UserAgentParser(object):
+    """A simple user agent parser.  Used by the `UserAgent`."""
+
+    platforms = (
+        ('iphone', 'iphone'),
+        (r'darwin|mac|os\s*x', 'macos'),
+        ('win', 'windows'),
+        (r'android', 'android'),
+        (r'x11|lin(\b|ux)?', 'linux'),
+        ('(sun|i86)os', 'solaris'),
+        (r'nintendo\s+wii', 'wii'),
+        ('irix', 'irix'),
+        ('hp-?ux', 'hpux'),
+        ('aix', 'aix'),
+        ('sco|unix_sv', 'sco'),
+        ('bsd', 'bsd'),
+        ('amiga', 'amiga')
+    )
+    browsers = (
+        ('googlebot', 'google'),
+        ('msnbot', 'msn'),
+        ('yahoo', 'yahoo'),
+        ('ask jeeves', 'ask'),
+        (r'aol|america\s+online\s+browser', 'aol'),
+        ('opera', 'opera'),
+        ('chrome', 'chrome'),
+        ('firefox|firebird|phoenix|iceweasel', 'firefox'),
+        ('galeon', 'galeon'),
+        ('safari', 'safari'),
+        ('webkit', 'webkit'),
+        ('camino', 'camino'),
+        ('konqueror', 'konqueror'),
+        ('k-meleon', 'kmeleon'),
+        ('netscape', 'netscape'),
+        (r'msie|microsoft\s+internet\s+explorer', 'msie'),
+        ('lynx', 'lynx'),
+        ('links', 'links'),
+        ('seamonkey|mozilla', 'seamonkey')
+    )
+
+    _browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
+    _language_re = re.compile(
+        r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
+        r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
+    )
+
+    def __init__(self):
+        self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
+        self.browsers = [(b, re.compile(self._browser_version_re % a))
+                         for a, b in self.browsers]
+
+    def __call__(self, user_agent):
+        for platform, regex in self.platforms:
+            match = regex.search(user_agent)
+            if match is not None:
+                break
+        else:
+            platform = None
+        for browser, regex in self.browsers:
+            match = regex.search(user_agent)
+            if match is not None:
+                version = match.group(1)
+                break
+        else:
+            browser = version = None
+        match = self._language_re.search(user_agent)
+        if match is not None:
+            language = match.group(1) or match.group(2)
+        else:
+            language = None
+        return platform, browser, version, language
+
+
+class UserAgent(object):
+    """Represents a user agent.  Pass it a WSGI environment or a user agent
+    string and you can inspect some of the details from the user agent
+    string via the attributes.  The following attributes exist:
+
+    .. attribute:: string
+
+       the raw user agent string
+
+    .. attribute:: platform
+
+       the browser platform.  The following platforms are currently
+       recognized:
+
+       -   `aix`
+       -   `amiga`
+       -   `android`
+       -   `bsd`
+       -   `hpux`
+       -   `iphone`
+       -   `irix`
+       -   `linux`
+       -   `macos`
+       -   `sco`
+       -   `solaris`
+       -   `wii`
+       -   `windows`
+
+    .. attribute:: browser
+
+        the name of the browser.  The following browsers are currently
+        recognized:
+
+        -   `aol` *
+        -   `ask` *
+        -   `camino`
+        -   `chrome`
+        -   `firefox`
+        -   `galeon`
+        -   `google` *
+        -   `kmeleon`
+        -   `konqueror`
+        -   `links`
+        -   `lynx`
+        -   `msie`
+        -   `msn`
+        -   `netscape`
+        -   `opera`
+        -   `safari`
+        -   `seamonkey`
+        -   `webkit`
+        -   `yahoo` *
+
+        (Browsers maked with a star (``*``) are crawlers.)
+
+    .. attribute:: version
+
+        the version of the browser
+
+    .. attribute:: language
+
+        the language of the browser
+    """
+
+    _parser = UserAgentParser()
+
+    def __init__(self, environ_or_string):
+        if isinstance(environ_or_string, dict):
+            environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
+        self.string = environ_or_string
+        self.platform, self.browser, self.version, self.language = \
+            self._parser(environ_or_string)
+
+    def to_header(self):
+        return self.string
+
+    def __str__(self):
+        return self.string
+
+    def __nonzero__(self):
+        return bool(self.browser)
+
+    def __repr__(self):
+        return '<%s %r/%s>' % (
+            self.__class__.__name__,
+            self.browser,
+            self.version
+        )
+
+
+# conceptionally this belongs in this module but because we want to lazily
+# load the user agent module (which happens in wrappers.py) we have to import
+# it afterwards.  The class itself has the module set to this module so
+# pickle, inspect and similar modules treat the object as if it was really
+# implemented here.
+from werkzeug.wrappers import UserAgentMixin
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/utils.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.utils
+    ~~~~~~~~~~~~~~
+
+    This module implements various utilities for WSGI applications.  Most of
+    them are used by the request and response wrappers but especially for
+    middleware development it makes sense to use them without the wrappers.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import os
+from time import time
+from datetime import datetime, timedelta
+
+from werkzeug._internal import _decode_unicode, \
+     _iter_modules, _ExtendedCookie, _ExtendedMorsel, \
+     _DictAccessorProperty, _dump_date, _parse_signature, _missing
+
+
+_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
+_entity_re = re.compile(r'&([^;]+);')
+_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
+_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
+                         'LPT2', 'LPT3', 'PRN', 'NUL')
+
+
+class cached_property(object):
+    """A decorator that converts a function into a lazy property.  The
+    function wrapped is called the first time to retrieve the result
+    and then that calculated result is used the next time you access
+    the value::
+
+        class Foo(object):
+
+            @cached_property
+            def foo(self):
+                # calculate something important here
+                return 42
+
+    The class has to have a `__dict__` in order for this property to
+    work.
+
+    .. versionchanged:: 0.6
+       the `writeable` attribute and parameter was deprecated.  If a
+       cached property is writeable or not has to be documented now.
+       For performance reasons the implementation does not honor the
+       writeable setting and will always make the property writeable.
+    """
+
+    # implementation detail: this property is implemented as non-data
+    # descriptor.  non-data descriptors are only invoked if there is
+    # no entry with the same name in the instance's __dict__.
+    # this allows us to completely get rid of the access function call
+    # overhead.  If one choses to invoke __get__ by hand the property
+    # will still work as expected because the lookup logic is replicated
+    # in __get__ for manual invocation.
+
+    def __init__(self, func, name=None, doc=None, writeable=False):
+        if writeable:
+            from warnings import warn
+            warn(DeprecationWarning('the writeable argument to the '
+                                    'cached property is a noop since 0.6 '
+                                    'because the property is writeable '
+                                    'by default for performance reasons'))
+
+        self.__name__ = name or func.__name__
+        self.__module__ = func.__module__
+        self.__doc__ = doc or func.__doc__
+        self.func = func
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        value = obj.__dict__.get(self.__name__, _missing)
+        if value is _missing:
+            value = self.func(obj)
+            obj.__dict__[self.__name__] = value
+        return value
+
+
+class environ_property(_DictAccessorProperty):
+    """Maps request attributes to environment variables. This works not only
+    for the Werzeug request object, but also any other class with an
+    environ attribute:
+
+    >>> class Test(object):
+    ...     environ = {'key': 'value'}
+    ...     test = environ_property('key')
+    >>> var = Test()
+    >>> var.test
+    'value'
+
+    If you pass it a second value it's used as default if the key does not
+    exist, the third one can be a converter that takes a value and converts
+    it.  If it raises :exc:`ValueError` or :exc:`TypeError` the default value
+    is used. If no default value is provided `None` is used.
+
+    Per default the property is read only.  You have to explicitly enable it
+    by passing ``read_only=False`` to the constructor.
+    """
+
+    read_only = True
+
+    def lookup(self, obj):
+        return obj.environ
+
+
+class header_property(_DictAccessorProperty):
+    """Like `environ_property` but for headers."""
+
+    def lookup(self, obj):
+        return obj.headers
+
+
+class HTMLBuilder(object):
+    """Helper object for HTML generation.
+
+    Per default there are two instances of that class.  The `html` one, and
+    the `xhtml` one for those two dialects.  The class uses keyword parameters
+    and positional parameters to generate small snippets of HTML.
+
+    Keyword parameters are converted to XML/SGML attributes, positional
+    arguments are used as children.  Because Python accepts positional
+    arguments before keyword arguments it's a good idea to use a list with the
+    star-syntax for some children:
+
+    >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
+    ...                        html.a('bar', href='bar.html')])
+    u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
+
+    This class works around some browser limitations and can not be used for
+    arbitrary SGML/XML generation.  For that purpose lxml and similar
+    libraries exist.
+
+    Calling the builder escapes the string passed:
+
+    >>> html.p(html("<foo>"))
+    u'<p>&lt;foo&gt;</p>'
+    """
+
+    from htmlentitydefs import name2codepoint
+    _entity_re = re.compile(r'&([^;]+);')
+    _entities = name2codepoint.copy()
+    _entities['apos'] = 39
+    _empty_elements = set([
+        'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', 'img',
+        'input', 'isindex', 'link', 'meta', 'param'
+    ])
+    _boolean_attributes = set([
+        'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
+        'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
+    ])
+    _plaintext_elements = set(['textarea'])
+    _c_like_cdata = set(['script', 'style'])
+    del name2codepoint
+
+    def __init__(self, dialect):
+        self._dialect = dialect
+
+    def __call__(self, s):
+        return escape(s)
+
+    def __getattr__(self, tag):
+        if tag[:2] == '__':
+            raise AttributeError(tag)
+        def proxy(*children, **arguments):
+            buffer = ['<' + tag]
+            write = buffer.append
+            for key, value in arguments.iteritems():
+                if value is None:
+                    continue
+                if key.endswith('_'):
+                    key = key[:-1]
+                if key in self._boolean_attributes:
+                    if not value:
+                        continue
+                    value = self._dialect == 'xhtml' and '="%s"' % key or ''
+                else:
+                    value = '="%s"' % escape(value, True)
+                write(' ' + key + value)
+            if not children and tag in self._empty_elements:
+                write(self._dialect == 'xhtml' and ' />' or '>')
+                return ''.join(buffer)
+            write('>')
+            children_as_string = ''.join(unicode(x) for x in children
+                                         if x is not None)
+            if children_as_string:
+                if tag in self._plaintext_elements:
+                    children_as_string = escape(children_as_string)
+                elif tag in self._c_like_cdata and self._dialect == 'xhtml':
+                    children_as_string = '/*<![CDATA[*/%s/*]]>*/' % \
+                                         children_as_string
+            buffer.extend((children_as_string, '</%s>' % tag))
+            return ''.join(buffer)
+        return proxy
+
+    def __repr__(self):
+        return '<%s for %r>' % (
+            self.__class__.__name__,
+            self._dialect
+        )
+
+
+html = HTMLBuilder('html')
+xhtml = HTMLBuilder('xhtml')
+
+
+def get_content_type(mimetype, charset):
+    """Return the full content type string with charset for a mimetype.
+
+    If the mimetype represents text the charset will be appended as charset
+    parameter, otherwise the mimetype is returned unchanged.
+
+    :param mimetype: the mimetype to be used as content type.
+    :param charset: the charset to be appended in case it was a text mimetype.
+    :return: the content type.
+    """
+    if mimetype.startswith('text/') or \
+       mimetype == 'application/xml' or \
+       (mimetype.startswith('application/') and
+        mimetype.endswith('+xml')):
+        mimetype += '; charset=' + charset
+    return mimetype
+
+
+def format_string(string, context):
+    """String-template format a string:
+
+    >>> format_string('$foo and ${foo}s', dict(foo=42))
+    '42 and 42s'
+
+    This does not do any attribute lookup etc.  For more advanced string
+    formattings have a look at the `werkzeug.template` module.
+
+    :param string: the format string.
+    :param context: a dict with the variables to insert.
+    """
+    def lookup_arg(match):
+        x = context[match.group(1) or match.group(2)]
+        if not isinstance(x, basestring):
+            x = type(string)(x)
+        return x
+    return _format_re.sub(lookup_arg, string)
+
+
+def secure_filename(filename):
+    r"""Pass it a filename and it will return a secure version of it.  This
+    filename can then safely be stored on a regular file system and passed
+    to :func:`os.path.join`.  The filename returned is an ASCII only string
+    for maximum portability.
+
+    On windows system the function also makes sure that the file is not
+    named after one of the special device files.
+
+    >>> secure_filename("My cool movie.mov")
+    'My_cool_movie.mov'
+    >>> secure_filename("../../../etc/passwd")
+    'etc_passwd'
+    >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
+    'i_contain_cool_umlauts.txt'
+
+    The function might return an empty filename.  It's your responsibility
+    to ensure that the filename is unique and that you generate random
+    filename if the function returned an empty one.
+
+    .. versionadded:: 0.5
+
+    :param filename: the filename to secure
+    """
+    if isinstance(filename, unicode):
+        from unicodedata import normalize
+        filename = normalize('NFKD', filename).encode('ascii', 'ignore')
+    for sep in os.path.sep, os.path.altsep:
+        if sep:
+            filename = filename.replace(sep, ' ')
+    filename = str(_filename_ascii_strip_re.sub('', '_'.join(
+                   filename.split()))).strip('._')
+
+    # on nt a couple of special files are present in each folder.  We
+    # have to ensure that the target file is not such a filename.  In
+    # this case we prepend an underline
+    if os.name == 'nt' and filename and \
+       filename.split('.')[0].upper() in _windows_device_files:
+        filename = '_' + filename
+
+    return filename
+
+
+def escape(s, quote=False):
+    """Replace special characters "&", "<" and ">" to HTML-safe sequences.  If
+    the optional flag `quote` is `True`, the quotation mark character (") is
+    also translated.
+
+    There is a special handling for `None` which escapes to an empty string.
+
+    :param s: the string to escape.
+    :param quote: set to true to also escape double quotes.
+    """
+    if s is None:
+        return ''
+    elif hasattr(s, '__html__'):
+        return s.__html__()
+    elif not isinstance(s, basestring):
+        s = unicode(s)
+    s = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
+    if quote:
+        s = s.replace('"', "&quot;")
+    return s
+
+
+def unescape(s):
+    """The reverse function of `escape`.  This unescapes all the HTML
+    entities, not only the XML entities inserted by `escape`.
+
+    :param s: the string to unescape.
+    """
+    def handle_match(m):
+        name = m.group(1)
+        if name in HTMLBuilder._entities:
+            return unichr(HTMLBuilder._entities[name])
+        try:
+            if name[:2] in ('#x', '#X'):
+                return unichr(int(name[2:], 16))
+            elif name.startswith('#'):
+                return unichr(int(name[1:]))
+        except ValueError:
+            pass
+        return u''
+    return _entity_re.sub(handle_match, s)
+
+
+def cookie_date(expires=None):
+    """Formats the time to ensure compatibility with Netscape's cookie
+    standard.
+
+    Accepts a floating point number expressed in seconds since the epoch in, a
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
+
+    Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
+
+    :param expires: If provided that date is used, otherwise the current.
+    """
+    return _dump_date(expires, '-')
+
+
+def parse_cookie(header, charset='utf-8', errors='ignore',
+                 cls=None):
+    """Parse a cookie.  Either from a string or WSGI environ.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    :exc:`HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       This function now returns a :class:`TypeConversionDict` instead of a
+       regular dict.  The `cls` parameter was added.
+
+    :param header: the header to be used to parse the cookie.  Alternatively
+                   this can be a WSGI environment.
+    :param charset: the charset for the cookie values.
+    :param errors: the error behavior for the charset decoding.
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`TypeConversionDict` is
+                       used.
+    """
+    if isinstance(header, dict):
+        header = header.get('HTTP_COOKIE', '')
+    if cls is None:
+        cls = TypeConversionDict
+    cookie = _ExtendedCookie()
+    cookie.load(header)
+    result = {}
+
+    # decode to unicode and skip broken items.  Our extended morsel
+    # and extended cookie will catch CookieErrors and convert them to
+    # `None` items which we have to skip here.
+    for key, value in cookie.iteritems():
+        if value.value is not None:
+            result[key] = _decode_unicode(unquote_header_value(value.value),
+                                          charset, errors)
+
+    return cls(result)
+
+
+def dump_cookie(key, value='', max_age=None, expires=None, path='/',
+                domain=None, secure=None, httponly=False, charset='utf-8',
+                sync_expires=True):
+    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
+    The parameters are the same as in the cookie Morsel object in the
+    Python standard library but it accepts unicode data, too.
+
+    :param max_age: should be a number of seconds, or `None` (default) if
+                    the cookie should last only as long as the client's
+                    browser session.  Additionally `timedelta` objects
+                    are accepted, too.
+    :param expires: should be a `datetime` object or unix timestamp.
+    :param path: limits the cookie to a given path, per default it will
+                 span the whole domain.
+    :param domain: Use this if you want to set a cross-domain cookie. For
+                   example, ``domain=".example.com"`` will set a cookie
+                   that is readable by the domain ``www.example.com``,
+                   ``foo.example.com`` etc. Otherwise, a cookie will only
+                   be readable by the domain that set it.
+    :param secure: The cookie will only be available via HTTPS
+    :param httponly: disallow JavaScript to access the cookie.  This is an
+                     extension to the cookie standard and probably not
+                     supported by all browsers.
+    :param charset: the encoding for unicode values.
+    :param sync_expires: automatically set expires if max_age is defined
+                         but expires not.
+    """
+    try:
+        key = str(key)
+    except UnicodeError:
+        raise TypeError('invalid key %r' % key)
+    if isinstance(value, unicode):
+        value = value.encode(charset)
+    value = quote_header_value(value)
+    morsel = _ExtendedMorsel(key, value)
+    if isinstance(max_age, timedelta):
+        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
+    if expires is not None:
+        if not isinstance(expires, basestring):
+            expires = cookie_date(expires)
+        morsel['expires'] = expires
+    elif max_age is not None and sync_expires:
+        morsel['expires'] = cookie_date(time() + max_age)
+    for k, v in (('path', path), ('domain', domain), ('secure', secure),
+                 ('max-age', max_age), ('httponly', httponly)):
+        if v is not None and v is not False:
+            morsel[k] = str(v)
+    return morsel.output(header='').lstrip()
+
+
+def http_date(timestamp=None):
+    """Formats the time to match the RFC1123 date format.
+
+    Accepts a floating point number expressed in seconds since the epoch in, a
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
+
+    Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
+
+    :param timestamp: If provided that date is used, otherwise the current.
+    """
+    return _dump_date(timestamp, ' ')
+
+
+def redirect(location, code=302):
+    """Return a response object (a WSGI application) that, if called,
+    redirects the client to the target location.  Supported codes are 301,
+    302, 303, 305, and 307.  300 is not supported because it's not a real
+    redirect and 304 because it's the answer for a request with a request
+    with defined If-Modified-Since headers.
+
+    .. versionadded:: 0.6
+       The location can now be a unicode string that is encoded using
+       the :func:`iri_to_uri` function.
+
+    :param location: the location the response should redirect to.
+    :param code: the redirect status code.
+    """
+    assert code in (301, 302, 303, 305, 307), 'invalid code'
+    from werkzeug.wrappers import BaseResponse
+    display_location = location
+    if isinstance(location, unicode):
+        from werkzeug.urls import iri_to_uri
+        location = iri_to_uri(location)
+    response = BaseResponse(
+        '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+        '<title>Redirecting...</title>\n'
+        '<h1>Redirecting...</h1>\n'
+        '<p>You should be redirected automatically to target URL: '
+        '<a href="%s">%s</a>.  If not click the link.' %
+        (location, display_location), code, mimetype='text/html')
+    response.headers['Location'] = location
+    return response
+
+
+def append_slash_redirect(environ, code=301):
+    """Redirect to the same URL but with a slash appended.  The behavior
+    of this function is undefined if the path ends with a slash already.
+
+    :param environ: the WSGI environment for the request that triggers
+                    the redirect.
+    :param code: the status code for the redirect.
+    """
+    new_path = environ['PATH_INFO'].strip('/') + '/'
+    query_string = environ.get('QUERY_STRING')
+    if query_string:
+        new_path += '?' + query_string
+    return redirect(new_path, code)
+
+
+def import_string(import_name, silent=False):
+    """Imports an object based on a string.  This is useful if you want to
+    use import paths as endpoints or something similar.  An import path can
+    be specified either in dotted notation (``xml.sax.saxutils.escape``)
+    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+    If `silent` is True the return value will be `None` if the import fails.
+
+    :param import_name: the dotted name for the object to import.
+    :param silent: if set to `True` import errors are ignored and
+                   `None` is returned instead.
+    :return: imported object
+    """
+    # force the import name to automatically convert to strings
+    if isinstance(import_name, unicode):
+        import_name = str(import_name)
+    try:
+        if ':' in import_name:
+            module, obj = import_name.split(':', 1)
+        elif '.' in import_name:
+            module, obj = import_name.rsplit('.', 1)
+        else:
+            return __import__(import_name)
+        # __import__ is not able to handle unicode strings in the fromlist
+        # if the module is a package
+        if isinstance(obj, unicode):
+            obj = obj.encode('utf-8')
+        try:
+            return getattr(__import__(module, None, None, [obj]), obj)
+        except (ImportError, AttributeError):
+            # support importing modules not yet set up by the parent module
+            # (or package for that matter)
+            modname = module + '.' + obj
+            __import__(modname)
+            return sys.modules[modname]
+    except ImportError:
+        if not silent:
+            raise
+
+
+def find_modules(import_path, include_packages=False, recursive=False):
+    """Find all the modules below a package.  This can be useful to
+    automatically import all views / controllers so that their metaclasses /
+    function decorators have a chance to register themselves on the
+    application.
+
+    Packages are not returned unless `include_packages` is `True`.  This can
+    also recursively list modules but in that case it will import all the
+    packages to get the correct load path of that module.
+
+    :param import_name: the dotted name for the package to find child modules.
+    :param include_packages: set to `True` if packages should be returned, too.
+    :param recursive: set to `True` if recursion should happen.
+    :return: generator
+    """
+    module = import_string(import_path)
+    path = getattr(module, '__path__', None)
+    if path is None:
+        raise ValueError('%r is not a package' % import_path)
+    basename = module.__name__ + '.'
+    for modname, ispkg in _iter_modules(path):
+        modname = basename + modname
+        if ispkg:
+            if include_packages:
+                yield modname
+            if recursive:
+                for item in find_modules(modname, include_packages, True):
+                    yield item
+        else:
+            yield modname
+
+
+def validate_arguments(func, args, kwargs, drop_extra=True):
+    """Check if the function accepts the arguments and keyword arguments.
+    Returns a new ``(args, kwargs)`` tuple that can safely be passed to
+    the function without causing a `TypeError` because the function signature
+    is incompatible.  If `drop_extra` is set to `True` (which is the default)
+    any extra positional or keyword arguments are dropped automatically.
+
+    The exception raised provides three attributes:
+
+    `missing`
+        A set of argument names that the function expected but where
+        missing.
+
+    `extra`
+        A dict of keyword arguments that the function can not handle but
+        where provided.
+
+    `extra_positional`
+        A list of values that where given by positional argument but the
+        function cannot accept.
+
+    This can be useful for decorators that forward user submitted data to
+    a view function::
+
+        from werkzeug import ArgumentValidationError, validate_arguments
+
+        def sanitize(f):
+            def proxy(request):
+                data = request.values.to_dict()
+                try:
+                    args, kwargs = validate_arguments(f, (request,), data)
+                except ArgumentValidationError:
+                    raise BadRequest('The browser failed to transmit all '
+                                     'the data expected.')
+                return f(*args, **kwargs)
+            return proxy
+
+    :param func: the function the validation is performed against.
+    :param args: a tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :param drop_extra: set to `False` if you don't want extra arguments
+                       to be silently dropped.
+    :return: tuple in the form ``(args, kwargs)``.
+    """
+    parser = _parse_signature(func)
+    args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
+    if missing:
+        raise ArgumentValidationError(tuple(missing))
+    elif (extra or extra_positional) and not drop_extra:
+        raise ArgumentValidationError(None, extra, extra_positional)
+    return tuple(args), kwargs
+
+
+def bind_arguments(func, args, kwargs):
+    """Bind the arguments provided into a dict.  When passed a function,
+    a tuple of arguments and a dict of keyword arguments `bind_arguments`
+    returns a dict of names as the function would see it.  This can be useful
+    to implement a cache decorator that uses the function arguments to build
+    the cache key based on the values of the arguments.
+
+    :param func: the function the arguments should be bound for.
+    :param args: tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :return: a :class:`dict` of bound keyword arguments.
+    """
+    args, kwargs, missing, extra, extra_positional, \
+        arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
+    values = {}
+    for (name, has_default, default), value in zip(arg_spec, args):
+        values[name] = value
+    if vararg_var is not None:
+        values[vararg_var] = tuple(extra_positional)
+    elif extra_positional:
+        raise TypeError('too many positional arguments')
+    if kwarg_var is not None:
+        multikw = set(extra) & set([x[0] for x in arg_spec])
+        if multikw:
+            raise TypeError('got multiple values for keyword argument ' +
+                            repr(iter(multikw).next()))
+        values[kwarg_var] = extra
+    elif extra:
+        raise TypeError('got unexpected keyword argument ' +
+                        repr(iter(extra).next()))
+    return values
+
+
+class ArgumentValidationError(ValueError):
+    """Raised if :func:`validate_arguments` fails to validate"""
+
+    def __init__(self, missing=None, extra=None, extra_positional=None):
+        self.missing = set(missing or ())
+        self.extra = extra or {}
+        self.extra_positional = extra_positional or []
+        ValueError.__init__(self, 'function arguments invalid.  ('
+                            '%d missing, %d additional)' % (
+            len(self.missing),
+            len(self.extra) + len(self.extra_positional)
+        ))
+
+
+# circular dependencies
+from werkzeug.http import quote_header_value, unquote_header_value
+from werkzeug.exceptions import BadRequest
+from werkzeug.datastructures import TypeConversionDict
+
+
+# DEPRECATED
+# these objects were previously in this module as well.  we import
+# them here for backwards compatibility with old pickles.
+from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
+     Headers, EnvironHeaders
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/wrappers.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,1495 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.wrappers
+    ~~~~~~~~~~~~~~~~~
+
+    The wrappers are simple request and response objects which you can
+    subclass to do whatever you want them to do.  The request object contains
+    the information transmitted by the client (webbrowser) and the response
+    object contains all the information sent back to the browser.
+
+    An important detail is that the request object is created with the WSGI
+    environ and will act as high-level proxy whereas the response object is an
+    actual WSGI application.
+
+    Like everything else in Werkzeug these objects will work correctly with
+    unicode data.  Incoming form data parsed by the response object will be
+    decoded into an unicode object if possible and if it makes sense.
+
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import tempfile
+import urlparse
+from datetime import datetime, timedelta
+
+from werkzeug.http import HTTP_STATUS_CODES, \
+     parse_accept_header, parse_cache_control_header, parse_etags, \
+     parse_date, generate_etag, is_resource_modified, unquote_etag, \
+     quote_etag, parse_set_header, parse_authorization_header, \
+     parse_www_authenticate_header, remove_entity_headers, \
+     parse_options_header, dump_options_header
+from werkzeug.urls import url_decode, iri_to_uri
+from werkzeug.formparser import parse_form_data, default_stream_factory
+from werkzeug.utils import cached_property, environ_property, \
+     cookie_date, parse_cookie, dump_cookie, http_date, escape, \
+     header_property, get_content_type
+from werkzeug.wsgi import get_current_url, get_host, LimitedStream, \
+     ClosingIterator
+from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
+     EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
+     ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
+     ResponseCacheControl, RequestCacheControl, CallbackDict
+from werkzeug._internal import _empty_stream, _decode_unicode, \
+     _patch_wrapper, _get_environ
+
+
+def _run_wsgi_app(*args):
+    """This function replaces itself to ensure that the test module is not
+    imported unless required.  DO NOT USE!
+    """
+    global _run_wsgi_app
+    from werkzeug.test import run_wsgi_app as _run_wsgi_app
+    return _run_wsgi_app(*args)
+
+
+def _warn_if_string(iterable):
+    """Helper for the response objects to check if the iterable returned
+    to the WSGI server is not a string.
+    """
+    if isinstance(iterable, basestring):
+        from warnings import warn
+        warn(Warning('response iterable was set to a string.  This appears '
+                     'to work but means that the server will send the '
+                     'data to the client char, by char.  This is almost '
+                     'never intended behavior, use response.data to assign '
+                     'strings to the response object.'), stacklevel=2)
+
+
+class BaseRequest(object):
+    """Very basic request object.  This does not implement advanced stuff like
+    entity tag parsing or cache controls.  The request object is created with
+    the WSGI environment as first argument and will add itself to the WSGI
+    environment as ``'werkzeug.request'`` unless it's created with
+    `populate_request` set to False.
+
+    There are a couple of mixins available that add additional functionality
+    to the request object, there is also a class called `Request` which
+    subclasses `BaseRequest` and all the important mixins.
+
+    It's a good idea to create a custom subclass of the :class:`BaseRequest`
+    and add missing functionality either via mixins or direct implementation.
+    Here an example for such subclasses::
+
+        from werkzeug import BaseRequest, ETagRequestMixin
+
+        class Request(BaseRequest, ETagRequestMixin):
+            pass
+
+    Request objects are **read only**.  As of 0.5 modifications are not
+    allowed in any place.  Unlike the lower level parsing functions the
+    request object will use immutable objects everywhere possible.
+
+    Per default the request object will assume all the text data is `utf-8`
+    encoded.  Please refer to `the unicode chapter <unicode.txt>`_ for more
+    details about customizing the behavior.
+
+    Per default the request object will be added to the WSGI
+    environment as `werkzeug.request` to support the debugging system.
+    If you don't want that, set `populate_request` to `False`.
+
+    If `shallow` is `True` the environment is initialized as shallow
+    object around the environ.  Every operation that would modify the
+    environ in any way (such as consuming form data) raises an exception
+    unless the `shallow` attribute is explicitly set to `False`.  This
+    is useful for middlewares where you don't want to consume the form
+    data by accident.  A shallow request is not populated to the WSGI
+    environment.
+
+    .. versionchanged:: 0.5
+       read-only mode was enforced by using immutables classes for all
+       data.
+    """
+
+    #: the charset for the request, defaults to utf-8
+    charset = 'utf-8'
+
+    #: the error handling procedure for errors, defaults to 'ignore'
+    encoding_errors = 'ignore'
+
+    #: set to True if the application runs behind an HTTP proxy
+    is_behind_proxy = False
+
+    #: the maximum content length.  This is forwarded to the form data
+    #: parsing function (:func:`parse_form_data`).  When set and the
+    #: :attr:`form` or :attr:`files` attribute is accessed and the
+    #: parsing fails because more than the specified value is transmitted
+    #: a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
+    #:
+    #: Have a look at :ref:`dealing-with-request-data` for more details.
+    #:
+    #: .. versionadded:: 0.5
+    max_content_length = None
+
+    #: the maximum form field size.  This is forwarded to the form data
+    #: parsing function (:func:`parse_form_data`).  When set and the
+    #: :attr:`form` or :attr:`files` attribute is accessed and the
+    #: data in memory for post data is longer than the specified value a
+    #: :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
+    #:
+    #: Have a look at :ref:`dealing-with-request-data` for more details.
+    #:
+    #: .. versionadded:: 0.5
+    max_form_memory_size = None
+
+    #: the class to use for `args` and `form`.  The default is an
+    #: :class:`ImmutableMultiDict` which supports multiple values per key.
+    #: alternatively it makes sense to use an :class:`ImmutableOrderedMultiDict`
+    #: which preserves order or a :class:`ImmutableDict` which is
+    #: the fastest but only remembers the last key.  It is also possible
+    #: to use mutable structures, but this is not recommended.
+    #:
+    #: .. versionadded:: 0.6
+    parameter_storage_class = ImmutableMultiDict
+
+    #: the type to be used for list values from the incoming WSGI
+    #: environment.  By default an :class:`ImmutableList` is used
+    #: (for example for :attr:`access_list`).
+    #:
+    #: .. versionadded:: 0.6
+    list_storage_class = ImmutableList
+
+    #: the type to be used for dict values from the incoming WSGI
+    #: environment.  By default an :class:`ImmutableTypeConversionDict`
+    #: is used (for example for :attr:`cookies`).
+    #:
+    #: .. versionadded:: 0.6
+    dict_storage_class = ImmutableTypeConversionDict
+
+    def __init__(self, environ, populate_request=True, shallow=False):
+        self.environ = environ
+        if populate_request and not shallow:
+            self.environ['werkzeug.request'] = self
+        self.shallow = shallow
+
+    def __repr__(self):
+        # make sure the __repr__ even works if the request was created
+        # from an invalid WSGI environment.  If we display the request
+        # in a debug session we don't want the repr to blow up.
+        args = []
+        try:
+            args.append("'%s'" % self.url)
+            args.append('[%s]' % self.method)
+        except:
+            args.append('(invalid WSGI environ)')
+
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            ' '.join(args)
+        )
+
+    @property
+    def url_charset(self):
+        """The charset that is assumed for URLs.  Defaults to the value
+        of :attr:`charset`.
+
+        .. versionadded:: 0.6
+        """
+        return self.charset
+
+    @classmethod
+    def from_values(cls, *args, **kwargs):
+        """Create a new request object based on the values provided.  If
+        environ is given missing values are filled from there.  This method is
+        useful for small scripts when you need to simulate a request from an URL.
+        Do not use this method for unittesting, there is a full featured client
+        object (:class:`Client`) that allows to create multipart requests,
+        support for cookies etc.
+
+        This accepts the same options as the :class:`EnvironBuilder`.
+
+        .. versionchanged:: 0.5
+           This method now accepts the same arguments as
+           :class:`EnvironBuilder`.  Because of this the `environ` parameter
+           is now called `environ_overrides`.
+
+        :return: request object
+        """
+        from werkzeug.test import EnvironBuilder
+        charset = kwargs.pop('charset', cls.charset)
+        builder = EnvironBuilder(*args, **kwargs)
+        try:
+            return builder.get_request(cls)
+        finally:
+            builder.close()
+
+    @classmethod
+    def application(cls, f):
+        """Decorate a function as responder that accepts the request as first
+        argument.  This works like the :func:`responder` decorator but the
+        function is passed the request object as first argument::
+
+            @Request.application
+            def my_wsgi_app(request):
+                return Response('Hello World!')
+
+        :param f: the WSGI callable to decorate
+        :return: a new WSGI callable
+        """
+        #: return a callable that wraps the -2nd argument with the request
+        #: and calls the function with all the arguments up to that one and
+        #: the request.  The return value is then called with the latest
+        #: two arguments.  This makes it possible to use this decorator for
+        #: both methods and standalone WSGI functions.
+        return _patch_wrapper(f, lambda *a: f(*a[:-2]+(cls(a[-2]),))(*a[-2:]))
+
+    def _get_file_stream(self, total_content_length, content_type, filename=None,
+                         content_length=None):
+        """Called to get a stream for the file upload.
+
+        This must provide a file-like class with `read()`, `readline()`
+        and `seek()` methods that is both writeable and readable.
+
+        The default implementation returns a temporary file if the total
+        content length is higher than 500KB.  Because many browsers do not
+        provide a content length for the files only the total content
+        length matters.
+
+        .. versionchanged:: 0.5
+           Previously this function was not passed any arguments.  In 0.5 older
+           functions not accepting any arguments are still supported for
+           backwards compatibility.
+
+        :param total_content_length: the total content length of all the
+                                     data in the request combined.  This value
+                                     is guaranteed to be there.
+        :param content_type: the mimetype of the uploaded file.
+        :param filename: the filename of the uploaded file.  May be `None`.
+        :param content_length: the length of this file.  This value is usually
+                               not provided because webbrowsers do not provide
+                               this value.
+        """
+        return default_stream_factory(total_content_length, content_type,
+                                      filename, content_length)
+
+    def _load_form_data(self):
+        """Method used internally to retrieve submitted data.  After calling
+        this sets `form` and `files` on the request object to multi dicts
+        filled with the incoming form data.  As a matter of fact the input
+        stream will be empty afterwards.
+
+        :internal:
+        """
+        # abort early if we have already consumed the stream
+        if 'stream' in self.__dict__:
+            return
+        if self.shallow:
+            raise RuntimeError('A shallow request tried to consume '
+                               'form data.  If you really want to do '
+                               'that, set `shallow` to False.')
+        data = None
+        stream = _empty_stream
+        if self.environ['REQUEST_METHOD'] in ('POST', 'PUT'):
+            try:
+                data = parse_form_data(self.environ, self._get_file_stream,
+                                       self.charset, self.encoding_errors,
+                                       self.max_form_memory_size,
+                                       self.max_content_length,
+                                       cls=self.parameter_storage_class,
+                                       silent=False)
+            except ValueError, e:
+                self._form_parsing_failed(e)
+        else:
+            # if we have a content length header we are able to properly
+            # guard the incoming stream, no matter what request method is
+            # used.
+            content_length = self.headers.get('content-length', type=int)
+            if content_length is not None:
+                stream = LimitedStream(self.environ['wsgi.input'],
+                                       content_length)
+
+        if data is None:
+            data = (stream, self.parameter_storage_class(),
+                    self.parameter_storage_class())
+
+        # inject the values into the instance dict so that we bypass
+        # our cached_property non-data descriptor.
+        d = self.__dict__
+        d['stream'], d['form'], d['files'] = data
+
+    def _form_parsing_failed(self, error):
+        """Called if parsing of form data failed.  This is currently only
+        invoked for failed multipart uploads.  By default this method does
+        nothing.
+
+        :param error: a `ValueError` object with a message why the
+                      parsing failed.
+
+        .. versionadded:: 0.5.1
+        """
+
+    @cached_property
+    def stream(self):
+        """The parsed stream if the submitted data was not multipart or
+        urlencoded form data.  This stream is the stream left by the form data
+        parser module after parsing.  This is *not* the WSGI input stream but
+        a wrapper around it that ensures the caller does not accidentally
+        read past `Content-Length`.
+        """
+        self._load_form_data()
+        return self.stream
+
+    input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
+        'In general it\'s a bad idea to use this one because you can easily '
+        'read past the boundary.  Use the :attr:`stream` instead.')
+
+    @cached_property
+    def args(self):
+        """The parsed URL parameters.  By default a :class:`ImmutableMultiDict`
+        is returned from this function.  This can be changed by setting
+        :attr:`parameter_storage_class` to a different type.  This might
+        be necessary if the order of the form data is important.
+        """
+        return url_decode(self.environ.get('QUERY_STRING', ''),
+                          self.url_charset, errors=self.encoding_errors,
+                          cls=self.parameter_storage_class)
+
+    @cached_property
+    def data(self):
+        """This reads the buffered incoming data from the client into the
+        string.  Usually it's a bad idea to access :attr:`data` because a client
+        could send dozens of megabytes or more to cause memory problems on the
+        server.
+
+        To circumvent that make sure to check the content length first.
+        """
+        return self.stream.read()
+
+    @cached_property
+    def form(self):
+        """The form parameters.  By default a :class:`ImmutableMultiDict`
+        is returned from this function.  This can be changed by setting
+        :attr:`parameter_storage_class` to a different type.  This might
+        be necessary if the order of the form data is important.
+        """
+        self._load_form_data()
+        return self.form
+
+    @cached_property
+    def values(self):
+        """Combined multi dict for :attr:`args` and :attr:`form`."""
+        args = []
+        for d in self.args, self.form:
+            if not isinstance(d, MultiDict):
+                d = MultiDict(d)
+            args.append(d)
+        return CombinedMultiDict(args)
+
+    @cached_property
+    def files(self):
+        """:class:`MultiDict` object containing all uploaded files.  Each key in
+        :attr:`files` is the name from the ``<input type="file" name="">``.  Each
+        value in :attr:`files` is a Werkzeug :class:`FileStorage` object.
+
+        Note that :attr:`files` will only contain data if the request method was
+        POST or PUT and the ``<form>`` that posted to the request had
+        ``enctype="multipart/form-data"``.  It will be empty otherwise.
+
+        See the :class:`MultiDict` / :class:`FileStorage` documentation for more
+        details about the used data structure.
+        """
+        self._load_form_data()
+        return self.files
+
+    @cached_property
+    def cookies(self):
+        """Read only access to the retrieved cookie values as dictionary."""
+        return parse_cookie(self.environ, self.charset,
+                            cls=self.dict_storage_class)
+
+    @cached_property
+    def headers(self):
+        """The headers from the WSGI environ as immutable
+        :class:`EnvironHeaders`.
+        """
+        return EnvironHeaders(self.environ)
+
+    @cached_property
+    def path(self):
+        """Requested path as unicode.  This works a bit like the regular path
+        info in the WSGI environment but will always include a leading slash,
+        even if the URL root is accessed.
+        """
+        path = '/' + (self.environ.get('PATH_INFO') or '').lstrip('/')
+        return _decode_unicode(path, self.url_charset, self.encoding_errors)
+
+    @cached_property
+    def script_root(self):
+        """The root path of the script without the trailing slash."""
+        path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/')
+        return _decode_unicode(path, self.url_charset, self.encoding_errors)
+
+    @cached_property
+    def url(self):
+        """The reconstructed current URL"""
+        return get_current_url(self.environ)
+
+    @cached_property
+    def base_url(self):
+        """Like :attr:`url` but without the querystring"""
+        return get_current_url(self.environ, strip_querystring=True)
+
+    @cached_property
+    def url_root(self):
+        """The full URL root (with hostname), this is the application root."""
+        return get_current_url(self.environ, True)
+
+    @cached_property
+    def host_url(self):
+        """Just the host with scheme."""
+        return get_current_url(self.environ, host_only=True)
+
+    @cached_property
+    def host(self):
+        """Just the host including the port if available."""
+        return get_host(self.environ)
+
+    query_string = environ_property('QUERY_STRING', '', read_only=True, doc=
+        '''The URL parameters as raw bytestring.''')
+    method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
+        '''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
+
+    @cached_property
+    def access_route(self):
+        """If a forwarded header exists this is a list of all ip addresses
+        from the client ip to the last proxy server.
+        """
+        if 'HTTP_X_FORWARDED_FOR' in self.environ:
+            addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
+            return self.list_storage_class([x.strip() for x in addr])
+        elif 'REMOTE_ADDR' in self.environ:
+            return self.list_storage_class([self.environ['REMOTE_ADDR']])
+        return self.list_storage_class()
+
+    @property
+    def remote_addr(self):
+        """The remote address of the client."""
+        if self.is_behind_proxy and self.access_route:
+            return self.access_route[0]
+        return self.environ.get('REMOTE_ADDR')
+
+    remote_user = environ_property('REMOTE_USER', doc='''
+        If the server supports user authentication, and the script is
+        protected, this attribute contains the username the user has
+        authenticated as.''')
+
+    is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
+                      .lower() == 'xmlhttprequest', doc='''
+        True if the request was triggered via a JavaScript XMLHttpRequest.
+        This only works with libraries that support the `X-Requested-With`
+        header and set it to "XMLHttpRequest".  Libraries that do that are
+        prototype, jQuery and Mochikit and probably some more.''')
+    is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
+                         doc='`True` if the request is secure.')
+    is_multithread = environ_property('wsgi.multithread', doc='''
+        boolean that is `True` if the application is served by
+        a multithreaded WSGI server.''')
+    is_multiprocess = environ_property('wsgi.multiprocess', doc='''
+        boolean that is `True` if the application is served by
+        a WSGI server that spawns multiple processes.''')
+    is_run_once = environ_property('wsgi.run_once', doc='''
+        boolean that is `True` if the application will be executed only
+        once in a process lifetime.  This is the case for CGI for example,
+        but it's not guaranteed that the exeuction only happens one time.''')
+
+
+class BaseResponse(object):
+    """Base response class.  The most important fact about a response object
+    is that it's a regular WSGI application.  It's initialized with a couple
+    of response parameters (headers, body, status code etc.) and will start a
+    valid WSGI response when called with the environ and start response
+    callable.
+
+    Because it's a WSGI application itself processing usually ends before the
+    actual response is sent to the server.  This helps debugging systems
+    because they can catch all the exceptions before responses are started.
+
+    Here a small example WSGI application that takes advantage of the
+    response objects::
+
+        from werkzeug import BaseResponse as Response
+
+        def index():
+            return Response('Index page')
+
+        def application(environ, start_response):
+            path = environ.get('PATH_INFO') or '/'
+            if path == '/':
+                response = index()
+            else:
+                response = Response('Not Found', status=404)
+            return response(environ, start_response)
+
+    Like :class:`BaseRequest` which object is lacking a lot of functionality
+    implemented in mixins.  This gives you a better control about the actual
+    API of your response objects, so you can create subclasses and add custom
+    functionality.  A full featured response object is available as
+    :class:`Response` which implements a couple of useful mixins.
+
+    To enforce a new type of already existing responses you can use the
+    :meth:`force_type` method.  This is useful if you're working with different
+    subclasses of response objects and you want to post process them with a
+    know interface.
+
+    Per default the request object will assume all the text data is `utf-8`
+    encoded.  Please refer to `the unicode chapter <unicode.txt>`_ for more
+    details about customizing the behavior.
+
+    Response can be any kind of iterable or string.  If it's a string
+    it's considered being an iterable with one item which is the string
+    passed.  Headers can be a list of tuples or a :class:`Headers` object.
+
+    Special note for `mimetype` and `content_type`:  For most mime types
+    `mimetype` and `content_type` work the same, the difference affects
+    only 'text' mimetypes.  If the mimetype passed with `mimetype` is a
+    mimetype starting with `text/` it becomes a charset parameter defined
+    with the charset of the response object.  In contrast the
+    `content_type` parameter is always added as header unmodified.
+
+    .. versionchanged:: 0.5
+       the `direct_passthrough` parameter was added.
+
+    :param response: a string or response iterable.
+    :param status: a string with a status or an integer with the status code.
+    :param headers: a list of headers or an :class:`Headers` object.
+    :param mimetype: the mimetype for the request.  See notice above.
+    :param content_type: the content type for the request.  See notice above.
+    :param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
+                               called before iteration which makes it
+                               possible to pass special iterators though
+                               unchanged (see :func:`wrap_file` for more
+                               details.)
+    """
+
+    #: the charset of the response.
+    charset = 'utf-8'
+
+    #: the default status if none is provided.
+    default_status = 200
+
+    #: the default mimetype if none is provided.
+    default_mimetype = 'text/plain'
+
+    #: if set to `False` accessing properties on the response object will
+    #: not try to consume the response iterator and convert it into a list.
+    #:
+    #: .. versionadded:: 0.6.2
+    #:
+    #:    That attribute was previously called `implicit_seqence_conversion`.
+    #:    (Notice the typo).  If you did use this feature, you have to adapt
+    #:    your code to the name change.
+    implicit_sequence_conversion = True
+
+    def __init__(self, response=None, status=None, headers=None,
+                 mimetype=None, content_type=None, direct_passthrough=False):
+        if isinstance(headers, Headers):
+            self.headers = headers
+        elif not headers:
+            self.headers = Headers()
+        else:
+            self.headers = Headers(headers)
+
+        if content_type is None:
+            if mimetype is None and 'content-type' not in self.headers:
+                mimetype = self.default_mimetype
+            if mimetype is not None:
+                mimetype = get_content_type(mimetype, self.charset)
+            content_type = mimetype
+        if content_type is not None:
+            self.headers['Content-Type'] = content_type
+        if status is None:
+            status = self.default_status
+        if isinstance(status, (int, long)):
+            self.status_code = status
+        else:
+            self.status = status
+
+        self.direct_passthrough = direct_passthrough
+        self._on_close = []
+
+        # we set the response after the headers so that if a class changes
+        # the charset attribute, the data is set in the correct charset.
+        if response is None:
+            self.response = []
+        elif isinstance(response, basestring):
+            self.data = response
+        else:
+            self.response = response
+
+    def call_on_close(self, func):
+        """Adds a function to the internal list of functions that should
+        be called as part of closing down the response.
+
+        .. versionadded:: 0.6
+        """
+        self._on_close.append(func)
+
+    def __repr__(self):
+        if self.is_sequence:
+            body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
+        else:
+            body_info = self.is_streamed and 'streamed' or 'likely-streamed'
+        return '<%s %s [%s]>' % (
+            self.__class__.__name__,
+            body_info,
+            self.status
+        )
+
+    @classmethod
+    def force_type(cls, response, environ=None):
+        """Enforce that the WSGI response is a response object of the current
+        type.  Werkzeug will use the :class:`BaseResponse` internally in many
+        situations like the exceptions.  If you call :meth:`get_response` on an
+        exception you will get back a regular :class:`BaseResponse` object, even
+        if you are using a custom subclass.
+
+        This method can enforce a given response type, and it will also
+        convert arbitrary WSGI callables into response objects if an environ
+        is provided::
+
+            # convert a Werkzeug response object into an instance of the
+            # MyResponseClass subclass.
+            response = MyResponseClass.force_type(response)
+
+            # convert any WSGI application into a response object
+            response = MyResponseClass.force_type(response, environ)
+
+        This is especially useful if you want to post-process responses in
+        the main dispatcher and use functionality provided by your subclass.
+
+        Keep in mind that this will modify response objects in place if
+        possible!
+
+        :param response: a response object or wsgi application.
+        :param environ: a WSGI environment object.
+        :return: a response object.
+        """
+        if not isinstance(response, BaseResponse):
+            if environ is None:
+                raise TypeError('cannot convert WSGI application into '
+                                'response objects without an environ')
+            response = BaseResponse(*_run_wsgi_app(response, environ))
+        response.__class__ = cls
+        return response
+
+    @classmethod
+    def from_app(cls, app, environ, buffered=False):
+        """Create a new response object from an application output.  This
+        works best if you pass it an application that returns a generator all
+        the time.  Sometimes applications may use the `write()` callable
+        returned by the `start_response` function.  This tries to resolve such
+        edge cases automatically.  But if you don't get the expected output
+        you should set `buffered` to `True` which enforces buffering.
+
+        :param app: the WSGI application to execute.
+        :param environ: the WSGI environment to execute against.
+        :param buffered: set to `True` to enforce buffering.
+        :return: a response object.
+        """
+        return cls(*_run_wsgi_app(app, environ, buffered))
+
+    def _get_status_code(self):
+        try:
+            return int(self.status.split(None, 1)[0])
+        except ValueError:
+            return 0
+    def _set_status_code(self, code):
+        try:
+            self.status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
+        except KeyError:
+            self.status = '%d UNKNOWN' % code
+    status_code = property(_get_status_code, _set_status_code,
+                           'The HTTP Status code as number')
+    del _get_status_code, _set_status_code
+
+    def _get_data(self):
+        """The string representation of the request body.  Whenever you access
+        this property the request iterable is encoded and flattened.  This
+        can lead to unwanted behavior if you stream big data.
+
+        This behavior can be disabled by setting
+        :attr:`implicit_sequence_conversion` to `False`.
+        """
+        self._ensure_sequence()
+        return ''.join(self.iter_encoded())
+    def _set_data(self, value):
+        # if an unicode string is set, it's encoded directly.  this allows
+        # us to guess the content length automatically in `get_wsgi_headers`.
+        if isinstance(value, unicode):
+            value = value.encode(self.charset)
+        self.response = [value]
+    data = property(_get_data, _set_data, doc=_get_data.__doc__)
+    del _get_data, _set_data
+
+    def _ensure_sequence(self, mutable=False):
+        """This method can be called by methods that need a sequence.  If
+        `mutable` is true, it will also ensure that the response sequence
+        is a standard Python list.
+
+        .. versionadded:: 0.6
+        """
+        if self.is_sequence:
+            # if we need a mutable object, we ensure it's a list.
+            if mutable and not isinstance(self.response, list):
+                self.response = list(self.response)
+            return
+        if not self.implicit_sequence_conversion:
+            raise RuntimeError('The response object required the iterable '
+                               'to be a sequence, but the implicit '
+                               'conversion was disabled.  Call '
+                               'make_sequence() yourself.')
+        self.make_sequence()
+
+    def make_sequence(self):
+        """Converts the response iterator in a list.  By default this happens
+        automatically if required.  If `implicit_sequence_conversion` is
+        disabled, this method is not automatically called and some properties
+        might raise exceptions.  This also encodes all the items.
+
+        .. versionadded:: 0.6
+        """
+        if not self.is_sequence:
+            # if we consume an iterable we have to ensure that the close
+            # method of the iterable is called if available when we tear
+            # down the response
+            close = getattr(self.response, 'close', None)
+            self.response = list(self.iter_encoded())
+            if close is not None:
+                self.call_on_close(close)
+
+    def iter_encoded(self, charset=None):
+        """Iter the response encoded with the encoding of the response.
+        If the response object is invoked as WSGI application the return
+        value of this method is used as application iterator unless
+        :attr:`direct_passthrough` was activated.
+
+        .. versionchanged:: 0.6
+
+           The `charset` parameter was deprecated and became a no-op.
+        """
+        # XXX: deprecated
+        if __debug__ and charset is not None: # pragma: no cover
+            from warnings import warn
+            warn(DeprecationWarning('charset was deprecated and is ignored.'),
+                 stacklevel=2)
+        charset = self.charset
+        if __debug__:
+            _warn_if_string(self.response)
+        for item in self.response:
+            if isinstance(item, unicode):
+                yield item.encode(charset)
+            else:
+                yield str(item)
+
+    def set_cookie(self, key, value='', max_age=None, expires=None,
+                   path='/', domain=None, secure=None, httponly=False):
+        """Sets a cookie. The parameters are the same as in the cookie `Morsel`
+        object in the Python standard library but it accepts unicode data, too.
+
+        :param key: the key (name) of the cookie to be set.
+        :param value: the value of the cookie.
+        :param max_age: should be a number of seconds, or `None` (default) if
+                        the cookie should last only as long as the client's
+                        browser session.
+        :param expires: should be a `datetime` object or UNIX timestamp.
+        :param domain: if you want to set a cross-domain cookie.  For example,
+                       ``domain=".example.com"`` will set a cookie that is
+                       readable by the domain ``www.example.com``,
+                       ``foo.example.com`` etc.  Otherwise, a cookie will only
+                       be readable by the domain that set it.
+        :param path: limits the cookie to a given path, per default it will
+                     span the whole domain.
+        """
+        self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
+                         expires, path, domain, secure, httponly,
+                         self.charset))
+
+    def delete_cookie(self, key, path='/', domain=None):
+        """Delete a cookie.  Fails silently if key doesn't exist.
+
+        :param key: the key (name) of the cookie to be deleted.
+        :param path: if the cookie that should be deleted was limited to a
+                     path, the path has to be defined here.
+        :param domain: if the cookie that should be deleted was limited to a
+                       domain, that domain has to be defined here.
+        """
+        self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
+
+    @property
+    def header_list(self): # pragma: no cover
+        # XXX: deprecated
+        if __debug__:
+            from warnings import warn
+            warn(DeprecationWarning('header_list is deprecated'),
+                 stacklevel=2)
+        return self.headers.to_list(self.charset)
+
+    @property
+    def is_streamed(self):
+        """If the response is streamed (the response is not an iterable with
+        a length information) this property is `True`.  In this case streamed
+        means that there is no information about the number of iterations.
+        This is usually `True` if a generator is passed to the response object.
+
+        This is useful for checking before applying some sort of post
+        filtering that should not take place for streamed responses.
+        """
+        try:
+            len(self.response)
+        except TypeError:
+            return True
+        return False
+
+    @property
+    def is_sequence(self):
+        """If the iterator is buffered, this property will be `True`.  A
+        response object will consider an iterator to be buffered if the
+        response attribute is a list or tuple.
+
+        .. versionadded:: 0.6
+        """
+        return isinstance(self.response, (tuple, list))
+
+    def close(self):
+        """Close the wrapped response if possible."""
+        if hasattr(self.response, 'close'):
+            self.response.close()
+        for func in self._on_close:
+            func()
+
+    def freeze(self):
+        """Call this method if you want to make your response object ready for
+        being pickled.  This buffers the generator if there is one.  It will
+        also set the `Content-Length` header to the length of the body.
+
+        .. versionchanged:: 0.6
+           The `Content-Length` header is now set.
+        """
+        # we explicitly set the length to a list of the *encoded* response
+        # iterator.  Even if the implicit sequence conversion is disabled.
+        self.response = list(self.iter_encoded())
+        self.headers['Content-Length'] = str(sum(map(len, self.response)))
+
+    def fix_headers(self, environ):
+        # XXX: deprecated
+        if __debug__:
+            from warnings import warn
+            warn(DeprecationWarning('called into deprecated fix_headers baseclass '
+                                    'method.  Use get_wsgi_headers instead.'),
+                 stacklevel=2)
+        self.headers[:] = self.get_wsgi_headers(environ)
+
+    def get_wsgi_headers(self, environ):
+        """This is automatically called right before the response is started
+        and returns headers modified for the given environment.  It returns a
+        copy of the headers from the response with some modifications applied
+        if necessary.
+
+        For example the location header (if present) is joined with the root
+        URL of the environment.  Also the content length is automatically set
+        to zero here for certain status codes.
+
+        .. versionchanged:: 0.6
+           Previously that function was called `fix_headers` and modified
+           the response object in place.  Also since 0.6, IRIs in location
+           and content-location headers are handled properly.
+
+           Also starting with 0.6, Werkzeug will attempt to set the content
+           length if it is able to figure it out on its own.  This is the
+           case if all the strings in the response iterable are already
+           encoded and the iterable is buffered.
+
+        :param environ: the WSGI environment of the request.
+        :return: returns a new :class:`Headers` object.
+        """
+        headers = Headers(self.headers)
+
+        # make sure the location header is an absolute URL
+        location = headers.get('location')
+        if location is not None:
+            if isinstance(location, unicode):
+                location = iri_to_uri(location)
+            headers['Location'] = urlparse.urljoin(
+                get_current_url(environ, root_only=True),
+                location
+            )
+
+        # make sure the content location is a URL
+        content_location = headers.get('content-location')
+        if content_location is not None and \
+           isinstance(content_location, unicode):
+            headers['Content-Location'] = iri_to_uri(content_location)
+
+        if 100 <= self.status_code < 200 or self.status_code == 204:
+            headers['Content-Length'] = '0'
+        elif self.status_code == 304:
+            remove_entity_headers(headers)
+
+        # if we can determine the content length automatically, we
+        # should try to do that.  But only if this does not involve
+        # flattening the iterator or encoding of unicode strings in
+        # the response.
+        if self.is_sequence and 'content-length' not in self.headers:
+            try:
+                content_length = sum(len(str(x)) for x in self.response)
+            except UnicodeError:
+                # aha, something non-bytestringy in there, too bad, we
+                # can't safely figure out the length of the response.
+                pass
+            else:
+                headers['Content-Length'] = str(content_length)
+
+        return headers
+
+    def get_app_iter(self, environ):
+        """Returns the application iterator for the given environ.  Depending
+        on the request method and the current status code the return value
+        might be an empty response rather than the one from the response.
+
+        If the request method is `HEAD` or the status code is in a range
+        where the HTTP specification requires an empty response, an empty
+        iterable is returned.
+
+        .. versionadded:: 0.6
+
+        :param environ: the WSGI environment of the request.
+        :return: a response iterable.
+        """
+        if environ['REQUEST_METHOD'] == 'HEAD' or \
+           100 <= self.status_code < 200 or self.status_code in (204, 304):
+            return ()
+        if self.direct_passthrough:
+            if __debug__:
+                _warn_if_string(self.response)
+            return self.response
+        return ClosingIterator(self.iter_encoded(), self.close)
+
+    def get_wsgi_response(self, environ):
+        """Returns the final WSGI response as tuple.  The first item in
+        the tuple is the application iterator, the second the status and
+        the third the list of headers.  The response returned is created
+        specially for the given environment.  For example if the request
+        method in the WSGI environment is ``'HEAD'`` the response will
+        be empty and only the headers and status code will be present.
+
+        .. versionadded:: 0.6
+
+        :param environ: the WSGI environment of the request.
+        :return: an ``(app_iter, status, headers)`` tuple.
+        """
+        # XXX: code for backwards compatibility with custom fix_headers
+        # methods.
+        if self.fix_headers.func_code is not \
+           BaseResponse.fix_headers.func_code:
+            if __debug__:
+                from warnings import warn
+                warn(DeprecationWarning('fix_headers changed behavior in 0.6 '
+                                        'and is now called get_wsgi_headers. '
+                                        'See documentation for more details.'),
+                     stacklevel=2)
+            self.fix_headers(environ)
+            headers = self.headers
+        else:
+            headers = self.get_wsgi_headers(environ)
+        app_iter = self.get_app_iter(environ)
+        return app_iter, self.status, headers.to_list(self.charset)
+
+    def __call__(self, environ, start_response):
+        """Process this response as WSGI application.
+
+        :param environ: the WSGI environment.
+        :param start_response: the response callable provided by the WSGI
+                               server.
+        :return: an application iterator
+        """
+        app_iter, status, headers = self.get_wsgi_response(environ)
+        start_response(status, headers)
+        return app_iter
+
+
+class AcceptMixin(object):
+    """A mixin for classes with an :attr:`~BaseResponse.environ` attribute to
+    get all the HTTP accept headers as :class:`Accept` objects (or subclasses
+    thereof).
+    """
+
+    @cached_property
+    def accept_mimetypes(self):
+        """List of mimetypes this client supports as :class:`MIMEAccept`
+        object.
+        """
+        return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
+
+    @cached_property
+    def accept_charsets(self):
+        """List of charsets this client supports as :class:`CharsetAccept`
+        object.
+        """
+        return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
+                                   CharsetAccept)
+
+    @cached_property
+    def accept_encodings(self):
+        """List of encodings this client accepts.  Encodings in a HTTP term
+        are compression encodings such as gzip.  For charsets have a look at
+        :attr:`accept_charset`.
+        """
+        return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
+
+    @cached_property
+    def accept_languages(self):
+        """List of languages this client accepts as :class:`LanguageAccept`
+        object.
+
+        .. versionchanged 0.5
+           In previous versions this was a regular :class:`Accept` object.
+        """
+        return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
+                                   LanguageAccept)
+
+
+class ETagRequestMixin(object):
+    """Add entity tag and cache descriptors to a request object or object with
+    a WSGI environment available as :attr:`~BaseRequest.environ`.  This not
+    only provides access to etags but also to the cache control header.
+    """
+
+    @cached_property
+    def cache_control(self):
+        """A :class:`RequestCacheControl` object for the incoming cache control
+        headers.
+        """
+        cache_control = self.environ.get('HTTP_CACHE_CONTROL')
+        return parse_cache_control_header(cache_control, None,
+                                          RequestCacheControl)
+
+    @cached_property
+    def if_match(self):
+        """An object containing all the etags in the `If-Match` header."""
+        return parse_etags(self.environ.get('HTTP_IF_MATCH'))
+
+    @cached_property
+    def if_none_match(self):
+        """An object containing all the etags in the `If-None-Match` header."""
+        return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
+
+    @cached_property
+    def if_modified_since(self):
+        """The parsed `If-Modified-Since` header as datetime object."""
+        return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
+
+    @cached_property
+    def if_unmodified_since(self):
+        """The parsed `If-Unmodified-Since` header as datetime object."""
+        return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
+
+
+class UserAgentMixin(object):
+    """Adds a `user_agent` attribute to the request object which contains the
+    parsed user agent of the browser that triggered the request as `UserAgent`
+    object.
+    """
+
+    @cached_property
+    def user_agent(self):
+        """The current user agent."""
+        from werkzeug.useragents import UserAgent
+        return UserAgent(self.environ)
+
+
+class AuthorizationMixin(object):
+    """Adds an :attr:`authorization` property that represents the parsed value
+    of the `Authorization` header as :class:`Authorization` object.
+    """
+
+    @cached_property
+    def authorization(self):
+        """The `Authorization` object in parsed form."""
+        header = self.environ.get('HTTP_AUTHORIZATION')
+        return parse_authorization_header(header)
+
+
+class ETagResponseMixin(object):
+    """Adds extra functionality to a response object for etag and cache
+    handling.  This mixin requires an object with at least a `headers`
+    object that implements a dict like interface similar to :class:`Headers`.
+
+    If you want the :meth:`freeze` method to automatically add an etag, you
+    have to mixin this method before the response base class.  The default
+    response class does not do that.
+    """
+
+    @property
+    def cache_control(self):
+        """The Cache-Control general-header field is used to specify
+        directives that MUST be obeyed by all caching mechanisms along the
+        request/response chain.
+        """
+        def on_update(cache_control):
+            if not cache_control and 'cache-control' in self.headers:
+                del self.headers['cache-control']
+            elif cache_control:
+                self.headers['Cache-Control'] = cache_control.to_header()
+        return parse_cache_control_header(self.headers.get('cache-control'),
+                                          on_update,
+                                          ResponseCacheControl)
+
+    def make_conditional(self, request_or_environ):
+        """Make the response conditional to the request.  This method works
+        best if an etag was defined for the response already.  The `add_etag`
+        method can be used to do that.  If called without etag just the date
+        header is set.
+
+        This does nothing if the request method in the request or environ is
+        anything but GET or HEAD.
+
+        It does not remove the body of the response because that's something
+        the :meth:`__call__` function does for us automatically.
+
+        Returns self so that you can do ``return resp.make_conditional(req)``
+        but modifies the object in-place.
+
+        :param request_or_environ: a request object or WSGI environment to be
+                                   used to make the response conditional
+                                   against.
+        """
+        environ = _get_environ(request_or_environ)
+        if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
+            self.headers['Date'] = http_date()
+            if 'content-length' in self.headers:
+                self.headers['Content-Length'] = len(self.data)
+            if not is_resource_modified(environ, self.headers.get('etag'), None,
+                                        self.headers.get('last-modified')):
+                self.status_code = 304
+        return self
+
+    def add_etag(self, overwrite=False, weak=False):
+        """Add an etag for the current response if there is none yet."""
+        if overwrite or 'etag' not in self.headers:
+            self.set_etag(generate_etag(self.data), weak)
+
+    def set_etag(self, etag, weak=False):
+        """Set the etag, and override the old one if there was one."""
+        self.headers['ETag'] = quote_etag(etag, weak)
+
+    def get_etag(self):
+        """Return a tuple in the form ``(etag, is_weak)``.  If there is no
+        ETag the return value is ``(None, None)``.
+        """
+        return unquote_etag(self.headers.get('ETag'))
+
+    def freeze(self, no_etag=False):
+        """Call this method if you want to make your response object ready for
+        pickeling.  This buffers the generator if there is one.  This also
+        sets the etag unless `no_etag` is set to `True`.
+        """
+        if not no_etag:
+            self.add_etag()
+        super(ETagResponseMixin, self).freeze()
+
+
+class ResponseStream(object):
+    """A file descriptor like object used by the :class:`ResponseStreamMixin` to
+    represent the body of the stream.  It directly pushes into the response
+    iterable of the response object.
+    """
+
+    mode = 'wb+'
+
+    def __init__(self, response):
+        self.response = response
+        self.closed = False
+
+    def write(self, value):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        self.response._ensure_sequence(mutable=True)
+        self.response.response.append(value)
+
+    def writelines(self, seq):
+        for item in seq:
+            self.write(item)
+
+    def close(self):
+        self.closed = True
+
+    def flush(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError('I/O operation on closed file')
+        return False
+
+    @property
+    def encoding(self):
+        return self.response.charset
+
+
+class ResponseStreamMixin(object):
+    """Mixin for :class:`BaseRequest` subclasses.  Classes that inherit from
+    this mixin will automatically get a :attr:`stream` property that provides
+    a write-only interface to the response iterable.
+    """
+
+    @cached_property
+    def stream(self):
+        """The response iterable as write-only stream."""
+        return ResponseStream(self)
+
+
+class CommonRequestDescriptorsMixin(object):
+    """A mixin for :class:`BaseRequest` subclasses.  Request objects that
+    mix this class in will automatically get descriptors for a couple of
+    HTTP headers with automatic type conversion.
+
+    .. versionadded:: 0.5
+    """
+
+    content_type = environ_property('CONTENT_TYPE', doc='''
+         The Content-Type entity-header field indicates the media type of
+         the entity-body sent to the recipient or, in the case of the HEAD
+         method, the media type that would have been sent had the request
+         been a GET.''')
+    content_length = environ_property('CONTENT_LENGTH', None, int, str, doc='''
+         The Content-Length entity-header field indicates the size of the
+         entity-body in bytes or, in the case of the HEAD method, the size of
+         the entity-body that would have been sent had the request been a
+         GET.''')
+    referrer = environ_property('HTTP_REFERER', doc='''
+        The Referer[sic] request-header field allows the client to specify,
+        for the server's benefit, the address (URI) of the resource from which
+        the Request-URI was obtained (the "referrer", although the header
+        field is misspelled).''')
+    date = environ_property('HTTP_DATE', None, parse_date, doc='''
+        The Date general-header field represents the date and time at which
+        the message was originated, having the same semantics as orig-date
+        in RFC 822.''')
+    max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
+         The Max-Forwards request-header field provides a mechanism with the
+         TRACE and OPTIONS methods to limit the number of proxies or gateways
+         that can forward the request to the next inbound server.''')
+
+    def _parse_content_type(self):
+        if not hasattr(self, '_parsed_content_type'):
+            self._parsed_content_type = \
+                parse_options_header(self.environ.get('CONTENT_TYPE', ''))
+
+    @property
+    def mimetype(self):
+        """Like :attr:`content_type` but without parameters (eg, without
+        charset, type etc.).  For example if the content
+        type is ``text/html; charset=utf-8`` the mimetype would be
+        ``'text/html'``.
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[0]
+
+    @property
+    def mimetype_params(self):
+        """The mimetype parameters as dict.  For example if the content
+        type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[1]
+
+    @cached_property
+    def pragma(self):
+        """The Pragma general-header field is used to include
+        implementation-specific directives that might apply to any recipient
+        along the request/response chain.  All pragma directives specify
+        optional behavior from the viewpoint of the protocol; however, some
+        systems MAY require that behavior be consistent with the directives.
+        """
+        return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
+
+
+class CommonResponseDescriptorsMixin(object):
+    """A mixin for :class:`BaseResponse` subclasses.  Response objects that
+    mix this class in will automatically get descriptors for a couple of
+    HTTP headers with automatic type conversion.
+    """
+
+    def _get_mimetype(self):
+        ct = self.headers.get('content-type')
+        if ct:
+            return ct.split(';')[0].strip()
+
+    def _set_mimetype(self, value):
+        self.headers['Content-Type'] = get_content_type(value, self.charset)
+
+    def _get_mimetype_params(self):
+        def on_update(d):
+            self.headers['Content-Type'] = \
+                dump_options_header(self.mimetype, d)
+        d = parse_options_header(self.headers.get('content-type', ''))[1]
+        return CallbackDict(d, on_update)
+
+    mimetype = property(_get_mimetype, _set_mimetype, doc='''
+        The mimetype (content type without charset etc.)''')
+    mimetype_params = property(_get_mimetype_params, doc='''
+        The mimetype parameters as dict.  For example if the content
+        type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+
+        .. versionadded:: 0.5
+        ''')
+    location = header_property('Location', doc='''
+        The Location response-header field is used to redirect the recipient
+        to a location other than the Request-URI for completion of the request
+        or identification of a new resource.''')
+    age = header_property('Age', None, parse_date, http_date, doc='''
+        The Age response-header field conveys the sender's estimate of the
+        amount of time since the response (or its revalidation) was
+        generated at the origin server.
+
+        Age values are non-negative decimal integers, representing time in
+        seconds.''')
+    content_type = header_property('Content-Type', doc='''
+        The Content-Type entity-header field indicates the media type of the
+        entity-body sent to the recipient or, in the case of the HEAD method,
+        the media type that would have been sent had the request been a GET.
+    ''')
+    content_length = header_property('Content-Length', None, int, str, doc='''
+        The Content-Length entity-header field indicates the size of the
+        entity-body, in decimal number of OCTETs, sent to the recipient or,
+        in the case of the HEAD method, the size of the entity-body that would
+        have been sent had the request been a GET.''')
+    content_location = header_property('Content-Location', doc='''
+        The Content-Location entity-header field MAY be used to supply the
+        resource location for the entity enclosed in the message when that
+        entity is accessible from a location separate from the requested
+        resource's URI.''')
+    content_encoding = header_property('Content-Encoding', doc='''
+        The Content-Encoding entity-header field is used as a modifier to the
+        media-type.  When present, its value indicates what additional content
+        codings have been applied to the entity-body, and thus what decoding
+        mechanisms must be applied in order to obtain the media-type
+        referenced by the Content-Type header field.''')
+    content_md5 = header_property('Content-MD5', doc='''
+         The Content-MD5 entity-header field, as defined in RFC 1864, is an
+         MD5 digest of the entity-body for the purpose of providing an
+         end-to-end message integrity check (MIC) of the entity-body.  (Note:
+         a MIC is good for detecting accidental modification of the
+         entity-body in transit, but is not proof against malicious attacks.)
+        ''')
+    date = header_property('Date', None, parse_date, http_date, doc='''
+        The Date general-header field represents the date and time at which
+        the message was originated, having the same semantics as orig-date
+        in RFC 822.''')
+    expires = header_property('Expires', None, parse_date, http_date, doc='''
+        The Expires entity-header field gives the date/time after which the
+        response is considered stale. A stale cache entry may not normally be
+        returned by a cache.''')
+    last_modified = header_property('Last-Modified', None, parse_date,
+                                    http_date, doc='''
+        The Last-Modified entity-header field indicates the date and time at
+        which the origin server believes the variant was last modified.''')
+
+    def _get_retry_after(self):
+        value = self.headers.get('retry-after')
+        if value is None:
+            return
+        elif value.isdigit():
+            return datetime.utcnow() + timedelta(seconds=int(value))
+        return parse_date(value)
+    def _set_retry_after(self, value):
+        if value is None:
+            if 'retry-after' in self.headers:
+                del self.headers['retry-after']
+            return
+        elif isinstance(value, datetime):
+            value = http_date(value)
+        else:
+            value = str(value)
+        self.headers['Retry-After'] = value
+
+    retry_after = property(_get_retry_after, _set_retry_after, doc='''
+        The Retry-After response-header field can be used with a 503 (Service
+        Unavailable) response to indicate how long the service is expected
+        to be unavailable to the requesting client.
+
+        Time in seconds until expiration or date.''')
+
+    def _set_property(name, doc=None):
+        def fget(self):
+            def on_update(header_set):
+                if not header_set and name in self.headers:
+                    del self.headers[name]
+                elif header_set:
+                    self.headers[name] = header_set.to_header()
+            return parse_set_header(self.headers.get(name), on_update)
+        return property(fget, doc=doc)
+
+    vary = _set_property('Vary', doc='''
+         The Vary field value indicates the set of request-header fields that
+         fully determines, while the response is fresh, whether a cache is
+         permitted to use the response to reply to a subsequent request
+         without revalidation.''')
+    content_language = _set_property('Content-Language', doc='''
+         The Content-Language entity-header field describes the natural
+         language(s) of the intended audience for the enclosed entity.  Note
+         that this might not be equivalent to all the languages used within
+         the entity-body.''')
+    allow = _set_property('Allow', doc='''
+        The Allow entity-header field lists the set of methods supported
+        by the resource identified by the Request-URI. The purpose of this
+        field is strictly to inform the recipient of valid methods
+        associated with the resource. An Allow header field MUST be
+        present in a 405 (Method Not Allowed) response.''')
+
+    del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
+        _set_retry_after
+
+
+class WWWAuthenticateMixin(object):
+    """Adds a :attr:`www_authenticate` property to a response object."""
+
+    @property
+    def www_authenticate(self):
+        """The `WWW-Authenticate` header in a parsed form."""
+        def on_update(www_auth):
+            if not www_auth and 'www-authenticate' in self.headers:
+                del self.headers['www-authenticate']
+            elif www_auth:
+                self.headers['WWW-Authenticate'] = www_auth.to_header()
+        header = self.headers.get('www-authenticate')
+        return parse_www_authenticate_header(header, on_update)
+
+
+class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
+              UserAgentMixin, AuthorizationMixin,
+              CommonRequestDescriptorsMixin):
+    """Full featured request object implementing the following mixins:
+
+    - :class:`AcceptMixin` for accept header parsing
+    - :class:`ETagRequestMixin` for etag and cache control handling
+    - :class:`UserAgentMixin` for user agent introspection
+    - :class:`AuthorizationMixin` for http auth handling
+    - :class:`CommonRequestDescriptorsMixin` for common headers
+    """
+
+
+class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
+               CommonResponseDescriptorsMixin,
+               WWWAuthenticateMixin):
+    """Full featured response object implementing the following mixins:
+
+    - :class:`ETagResponseMixin` for etag and cache control handling
+    - :class:`ResponseStreamMixin` to add support for the `stream` property
+    - :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
+    - :class:`WWWAuthenticateMixin` for HTTP authentication support
+    """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/bundled/werkzeug/werkzeug/wsgi.py	Fri Jun 11 20:14:01 2010 -0400
@@ -0,0 +1,763 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.wsgi
+    ~~~~~~~~~~~~~
+
+    This module implements WSGI related helpers.
+
+    :copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import urllib
+import urlparse
+import posixpath
+import mimetypes
+from zlib import adler32
+from time import time, mktime
+from datetime import datetime
+
+from werkzeug._internal import _patch_wrapper
+from werkzeug.utils import http_date
+from werkzeug.http import is_resource_modified
+
+
+def responder(f):
+    """Marks a function as responder.  Decorate a function with it and it
+    will automatically call the return value as WSGI application.
+
+    Example::
+
+        @responder
+        def application(environ, start_response):
+            return Response('Hello World!')
+    """
+    return _patch_wrapper(f, lambda *a: f(*a)(*a[-2:]))
+
+
+def get_current_url(environ, root_only=False, strip_querystring=False,
+                    host_only=False):
+    """A handy helper function that recreates the full URL for the current
+    request or parts of it.  Here an example:
+
+    >>> from werkzeug import create_environ
+    >>> env = create_environ("/?param=foo", "http://localhost/script")
+    >>> get_current_url(env)
+    'http://localhost/script/?param=foo'
+    >>> get_current_url(env, root_only=True)
+    'http://localhost/script/'
+    >>> get_current_url(env, host_only=True)
+    'http://localhost/'
+    >>> get_current_url(env, strip_querystring=True)
+    'http://localhost/script/'
+
+    :param environ: the WSGI environment to get the current URL from.
+    :param root_only: set `True` if you only want the root URL.
+    :param strip_querystring: set to `True` if you don't want the querystring.
+    :param host_only: set to `True` if the host URL should be returned.
+    """
+    tmp = [environ['wsgi.url_scheme'], '://', get_host(environ)]
+    cat = tmp.append
+    if host_only:
+        return ''.join(tmp) + '/'
+    cat(urllib.quote(environ.get('SCRIPT_NAME', '').rstrip('/')))
+    if root_only:
+        cat('/')
+    else:
+        cat(urllib.quote('/' + environ.get('PATH_INFO', '').lstrip('/')))
+        if not strip_querystring:
+            qs = environ.get('QUERY_STRING')
+            if qs:
+                cat('?' + qs)
+    return ''.join(tmp)
+
+
+def get_host(environ):
+    """Return the real host for the given WSGI environment.  This takes care
+    of the `X-Forwarded-Host` header.
+
+    :param environ: the WSGI environment to get the host of.
+    """
+    if 'HTTP_X_FORWARDED_HOST' in environ:
+        return environ['HTTP_X_FORWARDED_HOST']
+    elif 'HTTP_HOST' in environ:
+        return environ['HTTP_HOST']
+    result = environ['SERVER_NAME']
+    if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
+       in (('https', '443'), ('http', '80')):
+        result += ':' + environ['SERVER_PORT']
+    return result
+
+
+def pop_path_info(environ):
+    """Removes and returns the next segment of `PATH_INFO`, pushing it onto
+    `SCRIPT_NAME`.  Returns `None` if there is nothing left on `PATH_INFO`.
+
+    If there are empty segments (``'/foo//bar``) these are ignored but
+    properly pushed to the `SCRIPT_NAME`:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> pop_path_info(env)
+    'a'
+    >>> env['SCRIPT_NAME']
+    '/foo/a'
+    >>> pop_path_info(env)
+    'b'
+    >>> env['SCRIPT_NAME']
+    '/foo/a/b'
+
+    .. versionadded:: 0.5
+
+    :param environ: the WSGI environment that is modified.
+    """
+    path = environ.get('PATH_INFO')
+    if not path:
+        return None
+
+    script_name = environ.get('SCRIPT_NAME', '')
+
+    # shift multiple leading slashes over
+    old_path = path
+    path = path.lstrip('/')
+    if path != old_path:
+        script_name += '/' * (len(old_path) - len(path))
+
+    if '/' not in path:
+        environ['PATH_INFO'] = ''
+        environ['SCRIPT_NAME'] = script_name + path
+        return path
+
+    segment, path = path.split('/', 1)
+    environ['PATH_INFO'] = '/' + path
+    environ['SCRIPT_NAME'] = script_name + segment
+    return segment
+
+
+def peek_path_info(environ):
+    """Returns the next segment on the `PATH_INFO` or `None` if there
+    is none.  Works like :func:`pop_path_info` without modifying the
+    environment:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> peek_path_info(env)
+    'a'
+    >>> peek_path_info(env)
+    'a'
+
+    .. versionadded:: 0.5
+
+    :param environ: the WSGI environment that is checked.
+    """
+    segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
+    if segments:
+        return segments[0]
+
+
+def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
+                      errors='ignore', collapse_http_schemes=True):
+    """Extracts the path info from the given URL (or WSGI environment) and
+    path.  The path info returned is a unicode string, not a bytestring
+    suitable for a WSGI environment.  The URLs might also be IRIs.
+
+    If the path info could not be determined, `None` is returned.
+
+    Some examples:
+
+    >>> extract_path_info('http://example.com/app', '/app/hello')
+    u'/hello'
+    >>> extract_path_info('http://example.com/app',
+    ...                   'https://example.com/app/hello')
+    u'/hello'
+    >>> extract_path_info('http://example.com/app',
+    ...                   'https://example.com/app/hello',
+    ...                   collapse_http_schemes=False) is None
+    True
+
+    Instead of providing a base URL you can also pass a WSGI environment.
+
+    .. versionadded:: 0.6
+
+    :param environ_or_baseurl: a WSGI environment dict, a base URL or
+                               base IRI.  This is the root of the
+                               application.
+    :param path_or_url: an absolute path from the server root, a
+                        relative path (in which case it's the path info)
+                        or a full URL.  Also accepts IRIs and unicode
+                        parameters.
+    :param charset: the charset for byte data in URLs
+    :param errors: the error handling on decode
+    :param collapse_http_schemes: if set to `False` the algorithm does
+                                  not assume that http and https on the
+                                  same server point to the same
+                                  resource.
+    """
+    from werkzeug.urls import uri_to_iri, url_fix
+
+    def _as_iri(obj):
+        if not isinstance(obj, unicode):
+            return uri_to_iri(obj, charset, errors)
+        return obj
+
+    def _normalize_netloc(scheme, netloc):
+        parts = netloc.split(u'@', 1)[-1].split(u':', 1)
+        if len(parts) == 2:
+            netloc, port = parts
+            if (scheme == u'http' and port == u'80') or \
+               (scheme == u'https' and port == u'443'):
+                port = None
+        else:
+            netloc = parts[0]
+            port = None
+        if port is not None:
+            netloc += u':' + port
+        return netloc
+
+    # make sure whatever we are working on is a IRI and parse it
+    path = _as_iri(path_or_url)
+    if isinstance(environ_or_baseurl, dict):
+        environ_or_baseurl = get_current_url(environ_or_baseurl,
+                                             root_only=True)
+    base_iri = _as_iri(environ_or_baseurl)
+    base_scheme, base_netloc, base_path, = \
+        urlparse.urlsplit(base_iri)[:3]
+    cur_scheme, cur_netloc, cur_path, = \
+        urlparse.urlsplit(urlparse.urljoin(base_iri, path))[:3]
+
+    # normalize the network location
+    base_netloc = _normalize_netloc(base_scheme, base_netloc)
+    cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
+
+    # is that IRI even on a known HTTP scheme?
+    if collapse_http_schemes:
+        for scheme in base_scheme, cur_scheme:
+            if scheme not in (u'http', u'https'):
+                return None
+    else:
+        if not (base_scheme in (u'http', u'https') and \
+                base_scheme == cur_scheme):
+            return None
+
+    # are the netlocs compatible?
+    if base_netloc != cur_netloc:
+        return None
+
+    # are we below the application path?
+    base_path = base_path.rstrip(u'/')
+    if not cur_path.startswith(base_path):
+        return None
+
+    return u'/' + cur_path[len(base_path):].lstrip(u'/')
+
+
+class SharedDataMiddleware(object):
+    """A WSGI middleware that provides static content for development
+    environments or simple server setups. Usage is quite simple::
+
+        import os
+        from werkzeug import SharedDataMiddleware
+
+        app = SharedDataMiddleware(app, {
+            '/shared': os.path.join(os.path.dirname(__file__), 'shared')
+        })
+
+    The contents of the folder ``./shared`` will now be available on
+    ``http://example.com/shared/``.  This is pretty useful during development
+    because a standalone media server is not required.  One can also mount
+    files on the root folder and still continue to use the application because
+    the shared data middleware forwards all unhandled requests to the
+    application, even if the requests are below one of the shared folders.
+
+    If `pkg_resources` is available you can also tell the middleware to serve
+    files from package data::
+
+        app = SharedDataMiddleware(app, {
+            '/shared': ('myapplication', 'shared_files')
+        })
+
+    This will then serve the ``shared_files`` folder in the `myapplication`
+    Python package.
+
+    The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
+    rules for files that are not accessible from the web.  If `cache` is set to
+    `False` no caching headers are sent.
+
+    Currently the middleware does not support non ASCII filenames.  If the
+    encoding on the file system happens to be the encoding of the URI it may
+    work but this could also be by accident.  We strongly suggest using ASCII
+    only file names for static files.
+
+    The middleware will guess the mimetype using the Python `mimetype`
+    module.  If it's unable to figure out the charset it will fall back
+    to `fallback_mimetype`.
+
+    .. versionchanged:: 0.5
+       The cache timeout is configurable now.
+
+    .. versionadded:: 0.6
+       The `fallback_mimetype` parameter was added.
+
+    :param app: the application to wrap.  If you don't want to wrap an
+                application you can pass it :exc:`NotFound`.
+    :param exports: a dict of exported files and folders.
+    :param diallow: a list of :func:`~fnmatch.fnmatch` rules.
+    :param fallback_mimetype: the fallback mimetype for unknown files.
+    :param cache: enable or disable caching headers.
+    :Param cache_timeout: the cache timeout in seconds for the headers.
+    """
+
+    def __init__(self, app, exports, disallow=None, cache=True,
+                 cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
+        self.app = app
+        self.exports = {}
+        self.cache = cache
+        self.cache_timeout = cache_timeout
+        for key, value in exports.iteritems():
+            if isinstance(value, tuple):
+                loader = self.get_package_loader(*value)
+            elif isinstance(value, basestring):
+                if os.path.isfile(value):
+                    loader = self.get_file_loader(value)
+                else:
+                    loader = self.get_directory_loader(value)
+            else:
+                raise TypeError('unknown def %r' % value)
+            self.exports[key] = loader
+        if disallow is not None:
+            from fnmatch import fnmatch
+            self.is_allowed = lambda x: not fnmatch(x, disallow)
+        self.fallback_mimetype = fallback_mimetype
+
+    def is_allowed(self, filename):
+        """Subclasses can override this method to disallow the access to
+        certain files.  However by providing `disallow` in the constructor
+        this method is overwritten.
+        """
+        return True
+
+    def _opener(self, filename):
+        return lambda: (
+            open(filename, 'rb'),
+            datetime.utcfromtimestamp(os.path.getmtime(filename)),
+            int(os.path.getsize(filename))
+        )
+
+    def get_file_loader(self, filename):
+        return lambda x: (os.path.basename(filename), self._opener(filename))
+
+    def get_package_loader(self, package, package_path):
+        from pkg_resources import DefaultProvider, ResourceManager, \
+             get_provider
+        loadtime = datetime.utcnow()
+        provider = get_provider(package)
+        manager = ResourceManager()
+        filesystem_bound = isinstance(provider, DefaultProvider)
+        def loader(path):
+            if path is None:
+                return None, None
+            path = posixpath.join(package_path, path)
+            if not provider.has_resource(path):
+                return None, None
+            basename = posixpath.basename(path)
+            if filesystem_bound:
+                return basename, self._opener(
+                    provider.get_resource_filename(manager, path))
+            return basename, lambda: (
+                provider.get_resource_stream(manager, path),
+                loadtime,
+                0
+            )
+        return loader
+
+    def get_directory_loader(self, directory):
+        def loader(path):
+            if path is not None:
+                path = os.path.join(directory, path)
+            else:
+                path = directory
+            if os.path.isfile(path):
+                return os.path.basename(path), self._opener(path)
+            return None, None
+        return loader
+
+    def generate_etag(self, mtime, file_size, real_filename):
+        return 'wzsdm-%d-%s-%s' % (
+            mktime(mtime.timetuple()),
+            file_size,
+            adler32(real_filename) & 0xffffffff
+        )
+
+    def __call__(self, environ, start_response):
+        # sanitize the path for non unix systems
+        cleaned_path = environ.get('PATH_INFO', '').strip('/')
+        for sep in os.sep, os.altsep:
+            if sep and sep != '/':
+                cleaned_path = cleaned_path.replace(sep, '/')
+        path = '/'.join([''] + [x for x in cleaned_path.split('/')
+                                if x and x != '..'])
+        file_loader = None
+        for search_path, loader in self.exports.iteritems():
+            if search_path == path:
+                real_filename, file_loader = loader(None)
+                if file_loader is not None:
+                    break
+            if not search_path.endswith('/'):
+                search_path += '/'
+            if path.startswith(search_path):
+                real_filename, file_loader = loader(path[len(search_path):])
+                if file_loader is not None:
+                    break
+        if file_loader is None or not self.is_allowed(real_filename):
+            return self.app(environ, start_response)
+
+        guessed_type = mimetypes.guess_type(real_filename)
+        mime_type = guessed_type[0] or self.fallback_mimetype
+        f, mtime, file_size = file_loader()
+
+        headers = [('Date', http_date())]
+        if self.cache:
+            timeout = self.cache_timeout
+            etag = self.generate_etag(mtime, file_size, real_filename)
+            headers += [
+                ('Etag', '"%s"' % etag),
+                ('Cache-Control', 'max-age=%d, public' % timeout)
+            ]
+            if not is_resource_modified(environ, etag, last_modified=mtime):
+                f.close()
+                start_response('304 Not Modified', headers)
+                return []
+            headers.append(('Expires', http_date(time() + timeout)))
+        else:
+            headers.append(('Cache-Control', 'public'))
+
+        headers.extend((
+            ('Content-Type', mime_type),
+            ('Content-Length', str(file_size)),
+            ('Last-Modified', http_date(mtime))
+        ))
+        start_response('200 OK', headers)
+        return wrap_file(environ, f)
+
+
+class DispatcherMiddleware(object):
+    """Allows one to mount middlewares or applications in a WSGI application.
+    This is useful if you want to combine multiple WSGI applications::
+
+        app = DispatcherMiddleware(app, {
+            '/app2':        app2,
+            '/app3':        app3
+        })
+    """
+
+    def __init__(self, app, mounts=None):
+        self.app = app
+        self.mounts = mounts or {}
+
+    def __call__(self, environ, start_response):
+        script = environ.get('PATH_INFO', '')
+        path_info = ''
+        while '/' in script:
+            if script in self.mounts:
+                app = self.mounts[script]
+                break
+            items = script.split('/')
+            script = '/'.join(items[:-1])
+            path_info = '/%s%s' % (items[-1], path_info)
+        else:
+            app = self.mounts.get(script, self.app)
+        original_script_name = environ.get('SCRIPT_NAME', '')
+        environ['SCRIPT_NAME'] = original_script_name + script
+        environ['PATH_INFO'] = path_info
+        return app(environ, start_response)
+
+
+class ClosingIterator(object):
+    """The WSGI specification requires that all middlewares and gateways
+    respect the `close` callback of an iterator.  Because it is useful to add
+    another close action to a returned iterator and adding a custom iterator
+    is a boring task this class can be used for that::
+
+        return ClosingIterator(app(environ, start_response), [cleanup_session,
+                                                              cleanup_locals])
+
+    If there is just one close function it can be passed instead of the list.
+
+    A closing iterator is not needed if the application uses response objects
+    and finishes the processing if the response is started::
+
+        try:
+            return response(environ, start_response)
+        finally:
+            cleanup_session()
+            cleanup_locals()
+    """
+
+    def __init__(self, iterable, callbacks=None):
+        iterator = iter(iterable)
+        self._next = iterator.next
+        if callbacks is None:
+            callbacks = []
+        elif callable(callbacks):
+            callbacks = [callbacks]
+        else:
+            callbacks = list(callbacks)
+        iterable_close = getattr(iterator, 'close', None)
+        if iterable_close:
+            callbacks.insert(0, iterable_close)
+        self._callbacks = callbacks
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        return self._next()
+
+    def close(self):
+        for callback in self._callbacks:
+            callback()
+
+
+def wrap_file(environ, file, buffer_size=8192):
+    """Wraps a file.  This uses the WSGI server's file wrapper if available
+    or otherwise the generic :class:`FileWrapper`.
+
+    .. versionadded:: 0.5
+
+    If the file wrapper from the WSGI server is used it's important to not
+    iterate over it from inside the application but to pass it through
+    unchanged.  If you want to pass out a file wrapper inside a response
+    object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
+
+    More information about file wrappers are available in :pep:`333`.
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+    return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
+
+
+class FileWrapper(object):
+    """This class can be used to convert a :class:`file`-like object into
+    an iterable.  It yields `buffer_size` blocks until the file is fully
+    read.
+
+    You should not use this class directly but rather use the
+    :func:`wrap_file` function that uses the WSGI server's file wrapper
+    support if it's available.
+
+    .. versionadded:: 0.5
+
+    If you're using this object together with a :class:`BaseResponse` you have
+    to use the `direct_passthrough` mode.
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+
+    def __init__(self, file, buffer_size=8192):
+        self.file = file
+        self.buffer_size = buffer_size
+
+    def close(self):
+        if hasattr(self.file, 'close'):
+            self.file.close()
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        data = self.file.read(self.buffer_size)
+        if data:
+            return data
+        raise StopIteration()
+
+
+def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
+    """Safely iterates line-based over an input stream.  If the input stream
+    is not a :class:`LimitedStream` the `limit` parameter is mandatory.
+
+    This uses the stream's :meth:`~file.read` method internally as opposite
+    to the :meth:`~file.readline` method that is unsafe and can only be used
+    in violation of the WSGI specification.  The same problem applies to the
+    `__iter__` function of the input stream which calls :meth:`~file.readline`
+    without arguments.
+
+    If you need line-by-line processing it's strongly recommended to iterate
+    over the input stream using this helper function.
+
+    :param stream: the stream to iterate over.
+    :param limit: the limit in bytes for the stream.  (Usually
+                  content length.  Not necessary if the `stream`
+                  is a :class:`LimitedStream`.
+    :param buffer_size: The optional buffer size.
+    """
+    if not isinstance(stream, LimitedStream):
+        if limit is None:
+            raise TypeError('stream not limited and no limit provided.')
+        stream = LimitedStream(stream, limit)
+    _read = stream.read
+    buffer = []
+    while 1:
+        if len(buffer) > 1:
+            yield buffer.pop()
+            continue
+
+        # we reverse the chunks because popping from the last
+        # position of the list is O(1) and the number of chunks
+        # read will be quite large for binary files.
+        chunks = _read(buffer_size).splitlines(True)
+        chunks.reverse()
+
+        first_chunk = buffer and buffer[0] or ''
+        if chunks:
+            first_chunk += chunks.pop()
+        if not first_chunk:
+            return
+
+        buffer = chunks
+        yield first_chunk
+
+
+class LimitedStream(object):
+    """Wraps a stream so that it doesn't read more than n bytes.  If the
+    stream is exhausted and the caller tries to get more bytes from it
+    :func:`on_exhausted` is called which by default returns an empty
+    string.  The return value of that function is forwarded
+    to the reader function.  So if it returns an empty string
+    :meth:`read` will return an empty string as well.
+
+    The limit however must never be higher than what the stream can
+    output.  Otherwise :meth:`readlines` will try to read past the
+    limit.
+
+    The `silent` parameter has no effect if :meth:`is_exhausted` is
+    overriden by a subclass.
+
+    .. versionchanged:: 0.6
+       Non-silent usage was deprecated because it causes confusion.
+       If you want that, override :meth:`is_exhausted` and raise a
+       :exc:`~exceptions.BadRequest` yourself.
+
+    .. admonition:: Note on WSGI compliance
+
+       calls to :meth:`readline` and :meth:`readlines` are not
+       WSGI compliant because it passes a size argument to the
+       readline methods.  Unfortunately the WSGI PEP is not safely
+       implementable without a size argument to :meth:`readline`
+       because there is no EOF marker in the stream.  As a result
+       of that the use of :meth:`readline` is discouraged.
+
+       For the same reason iterating over the :class:`LimitedStream`
+       is not portable.  It internally calls :meth:`readline`.
+
+       We strongly suggest using :meth:`read` only or using the
+       :func:`make_line_iter` which safely iterates line-based
+       over a WSGI input stream.
+
+    :param stream: the stream to wrap.
+    :param limit: the limit for the stream, must not be longer than
+                  what the string can provide if the stream does not
+                  end with `EOF` (like `wsgi.input`)
+    :param silent: If set to `True` the stream will allow reading
+                   past the limit and will return an empty string.
+    """
+
+    def __init__(self, stream, limit, silent=True):
+        self._read = stream.read
+        self._readline = stream.readline
+        self._pos = 0
+        self.limit = limit
+        self.silent = silent
+        if not silent:
+            from warnings import warn
+            warn(DeprecationWarning('non-silent usage of the '
+            'LimitedStream is deprecated.  If you want to '
+            'continue to use the stream in non-silent usage '
+            'override on_exhausted.'), stacklevel=2)
+
+    def __iter__(self):
+        return self
+
+    @property
+    def is_exhausted(self):
+        """If the stream is exhausted this attribute is `True`."""
+        return self._pos >= self.limit
+
+    def on_exhausted(self):
+        """This is called when the stream tries to read past the limit.
+        The return value of this function is returned from the reading
+        function.
+
+        Per default this raises a :exc:`~werkzeug.exceptions.BadRequest`.
+        """
+        if self.silent:
+            return ''
+        from werkzeug.exceptions import BadRequest
+        raise BadRequest('input stream exhausted')
+
+    def exhaust(self, chunk_size=1024 * 16):
+        """Exhaust the stream.  This consumes all the data left until the
+        limit is reached.
+
+        :param chunk_size: the size for a chunk.  It will read the chunk
+                           until the stream is exhausted and throw away
+                           the results.
+        """
+        to_read = self.limit - self._pos
+        chunk = chunk_size
+        while to_read > 0:
+            chunk = min(to_read, chunk)
+            self.read(chunk)
+            to_read -= chunk
+
+    def read(self, size=None):
+        """Read `size` bytes or if size is not provided everything is read.
+
+        :param size: the number of bytes read.
+        """
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None:
+            size = self.limit
+        read = self._read(min(self.limit - self._pos, size))
+        self._pos += len(read)
+        return read
+
+    def readline(self, size=None):
+        """Reads one line from the stream."""
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None:
+            size = self.limit - self._pos
+        else:
+            size = min(size, self.limit - self._pos)
+        line = self._readline(size)
+        self._pos += len(line)
+        return line
+
+    def readlines(self, size=None):
+        """Reads a file into a list of strings.  It calls :meth:`readline`
+        until the file is read to the end.  It does support the optional
+        `size` argument if the underlaying stream supports it for
+        `readline`.
+        """
+        last_pos = self._pos
+        result = []
+        if size is not None:
+            end = min(self.limit, last_pos + size)
+        else:
+            end = self.limit
+        while 1:
+            if size is not None:
+                size -= last_pos - self._pos
+            if self._pos >= end:
+                break
+            result.append(self.readline(size))
+            if size is not None:
+                last_pos = self._pos
+        return result
+
+    def next(self):
+        line = self.readline()
+        if line is None:
+            raise StopIteration()
+        return line