`_
+
+
+
diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD b/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD
new file mode 100644
index 0000000..761bba7
--- /dev/null
+++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/RECORD
@@ -0,0 +1,11 @@
+Flask_SQLAlchemy-2.2.dist-info/DESCRIPTION.rst,sha256=Mp4bpckSjf082xflOARFwzWLTnUszq7JxcY0dR9vD2w,273
+Flask_SQLAlchemy-2.2.dist-info/LICENSE.txt,sha256=2smrI3hNiP6c5TcX0fa6fqODgsdJVLC166X0kVxei9A,1457
+Flask_SQLAlchemy-2.2.dist-info/METADATA,sha256=hIgP0kudClmQDnErllZMdlp_WjajcBmlILbSKzVMovI,1348
+Flask_SQLAlchemy-2.2.dist-info/RECORD,,
+Flask_SQLAlchemy-2.2.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
+Flask_SQLAlchemy-2.2.dist-info/metadata.json,sha256=mU6wJVocytpfiF75Utnlrh36Z2ZzFeF9h7oTVtiTy50,1219
+Flask_SQLAlchemy-2.2.dist-info/top_level.txt,sha256=w2K4fNNoTh4HItoFfz2FRQShSeLcvHYrzU_sZov21QU,17
+flask_sqlalchemy/__init__.py,sha256=U0HkvloC9KPZpgT8zzLQEyRpLAhVkG5AUt_HzrzTVeY,36641
+flask_sqlalchemy/_compat.py,sha256=wVdOzefs6BI6Fi5mu21YctjQjrkIDdkPfgsWkye1v-4,623
+flask_sqlalchemy/__pycache__/_compat.cpython-34.pyc,,
+flask_sqlalchemy/__pycache__/__init__.cpython-34.pyc,,
diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL b/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL
new file mode 100644
index 0000000..7bf9daa
--- /dev/null
+++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0.a0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json b/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json
new file mode 100644
index 0000000..49f19aa
--- /dev/null
+++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "extensions": {"python.details": {"contacts": [{"email": "phil@quae.co.uk", "name": "Phil Howell", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "http://github.com/mitsuhiko/flask-sqlalchemy"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "BSD", "metadata_version": "2.0", "name": "Flask-SQLAlchemy", "platform": "any", "run_requires": [{"requires": ["Flask (>=0.10)", "SQLAlchemy (>=0.8.0)"]}], "summary": "Adds SQLAlchemy support to your Flask application", "version": "2.2"}
\ No newline at end of file
diff --git a/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt b/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt
new file mode 100644
index 0000000..8a5538e
--- /dev/null
+++ b/app/lib/Flask_SQLAlchemy-2.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+flask_sqlalchemy
diff --git a/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst b/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000..4421f04
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/DESCRIPTION.rst
@@ -0,0 +1,36 @@
+Jinja2
+~~~~~~
+
+Jinja2 is a template engine written in pure Python. It provides a
+`Django`_ inspired non-XML syntax but supports inline expressions and
+an optional `sandboxed`_ environment.
+
+Nutshell
+--------
+
+Here a small example of a Jinja template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+Philosophy
+----------
+
+Application logic is for the controller but don't try to make the life
+for the template designer too hard by giving him too few functionality.
+
+For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
+
+.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
+.. _Django: http://www.djangoproject.com/
+.. _Jinja2 webpage: http://jinja.pocoo.org/
+.. _documentation: http://jinja.pocoo.org/2/documentation/
+
+
diff --git a/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt b/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt
new file mode 100644
index 0000000..10145a2
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/LICENSE.txt
@@ -0,0 +1,31 @@
+Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/app/lib/Jinja2-2.9.6.dist-info/METADATA b/app/lib/Jinja2-2.9.6.dist-info/METADATA
new file mode 100644
index 0000000..ea69de1
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/METADATA
@@ -0,0 +1,65 @@
+Metadata-Version: 2.0
+Name: Jinja2
+Version: 2.9.6
+Summary: A small but fast and easy to use stand-alone template engine written in pure python.
+Home-page: http://jinja.pocoo.org/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Dist: MarkupSafe (>=0.23)
+Provides-Extra: i18n
+Requires-Dist: Babel (>=0.8); extra == 'i18n'
+
+Jinja2
+~~~~~~
+
+Jinja2 is a template engine written in pure Python. It provides a
+`Django`_ inspired non-XML syntax but supports inline expressions and
+an optional `sandboxed`_ environment.
+
+Nutshell
+--------
+
+Here a small example of a Jinja template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+Philosophy
+----------
+
+Application logic is for the controller but don't try to make the life
+for the template designer too hard by giving him too few functionality.
+
+For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
+
+.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
+.. _Django: http://www.djangoproject.com/
+.. _Jinja2 webpage: http://jinja.pocoo.org/
+.. _documentation: http://jinja.pocoo.org/2/documentation/
+
+
diff --git a/app/lib/Jinja2-2.9.6.dist-info/RECORD b/app/lib/Jinja2-2.9.6.dist-info/RECORD
new file mode 100644
index 0000000..4c81f8d
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/RECORD
@@ -0,0 +1,58 @@
+jinja2/__init__.py,sha256=Cx_UnJO4i_GqvKQsOu__mvGE_eMJSsBqITa26irtg5A,2565
+jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
+jinja2/_stringdefs.py,sha256=PYtqTmmWIhjXlFBoH-eE6fJkQvlu7nxUyQ2YlFB97VA,589381
+jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
+jinja2/asyncsupport.py,sha256=ZJO1Fdd9R93sDLrk6TZNuMQGgtuDmpTlENNRkLwZF7c,7765
+jinja2/bccache.py,sha256=0xoVw0R9nj3vtzPl9g-zB5BKTLFJ7FFMq2ABbn1IkCI,12793
+jinja2/compiler.py,sha256=lE5owyPwT1cGGZxWyzQtZLW7Uj1g3Vw1oVtBU8Uc_uM,62929
+jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
+jinja2/debug.py,sha256=UqEbKb4zofBABwvyA77Kr0-5IAQawKqC9t8ZeTIzpGU,12038
+jinja2/defaults.py,sha256=GvVEQqIRvRMCbQF2NZSr0mlEN8lxvGixU5wIIAeRe1A,1323
+jinja2/environment.py,sha256=z91L_efdYs-KNs6DBxQWDyYncOwOqn_0J4M5CfFj0Q8,50848
+jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
+jinja2/ext.py,sha256=9xq8fd_QPBIe4Z7hE1XawB7f1EDHrVZjpb2JiRTiG94,23867
+jinja2/filters.py,sha256=1OYGhyN84yVmFUIOwJNRV_StqTCfPhnRLfJTmWbEe_8,33424
+jinja2/idtracking.py,sha256=HHcCOMsQhCrrjwYAmikKqq_XetXLovCjXAThh9WbRAc,8760
+jinja2/lexer.py,sha256=W4A830e-fj12zRT6rL7H91F4D6xwED5LjR8iMxjWuVQ,28238
+jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
+jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
+jinja2/nodes.py,sha256=4_Ucxbkohtj4BAlpV0w_MpVmIxJNaVXDTBb4EHBA2JI,29392
+jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
+jinja2/parser.py,sha256=3tc82qO1Ovs9och_PjirbAmnWNT77n4wWjIQ8pEVKvU,35465
+jinja2/runtime.py,sha256=axkTQXg2-oc_Cm35NEMDDas3Jbq3ATxNrDOEa5v3wIw,26835
+jinja2/sandbox.py,sha256=Jx4MTxly8KvdkSWyui_kHY1_ZZ0RAQL4ojAy1KDRyK0,16707
+jinja2/tests.py,sha256=iFuUTbUYv7TFffq2aTswCRdIhQ6wyrby1YevChVPqkE,4428
+jinja2/utils.py,sha256=BIFqeXXsCUSjWx6MEwYhY6V4tXzVNs9WRXfB60MA9HY,19941
+jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
+Jinja2-2.9.6.dist-info/DESCRIPTION.rst,sha256=CXIS1UnPSk5_lZBS6Lb8ko-3lqGfjsiUwNBLXCTj2lc,975
+Jinja2-2.9.6.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
+Jinja2-2.9.6.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
+Jinja2-2.9.6.dist-info/METADATA,sha256=53LSXlqC86JTyLSPsDyAOmyV4pXIzzmmZoUXz7ogytA,2172
+Jinja2-2.9.6.dist-info/metadata.json,sha256=vzvX25T4hwMOe1EIOBo9rpfiZerOB_KVLcplGG_qYtE,1394
+Jinja2-2.9.6.dist-info/RECORD,,
+Jinja2-2.9.6.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+Jinja2-2.9.6.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+jinja2/__pycache__/exceptions.cpython-34.pyc,,
+jinja2/__pycache__/filters.cpython-34.pyc,,
+jinja2/__pycache__/runtime.cpython-34.pyc,,
+jinja2/__pycache__/meta.cpython-34.pyc,,
+jinja2/__pycache__/loaders.cpython-34.pyc,,
+jinja2/__pycache__/_compat.cpython-34.pyc,,
+jinja2/__pycache__/ext.cpython-34.pyc,,
+jinja2/__pycache__/__init__.cpython-34.pyc,,
+jinja2/__pycache__/nodes.cpython-34.pyc,,
+jinja2/__pycache__/environment.cpython-34.pyc,,
+jinja2/__pycache__/parser.cpython-34.pyc,,
+jinja2/__pycache__/defaults.cpython-34.pyc,,
+jinja2/__pycache__/visitor.cpython-34.pyc,,
+jinja2/__pycache__/utils.cpython-34.pyc,,
+jinja2/__pycache__/idtracking.cpython-34.pyc,,
+jinja2/__pycache__/sandbox.cpython-34.pyc,,
+jinja2/__pycache__/debug.cpython-34.pyc,,
+jinja2/__pycache__/_stringdefs.cpython-34.pyc,,
+jinja2/__pycache__/tests.cpython-34.pyc,,
+jinja2/__pycache__/bccache.cpython-34.pyc,,
+jinja2/__pycache__/compiler.cpython-34.pyc,,
+jinja2/__pycache__/optimizer.cpython-34.pyc,,
+jinja2/__pycache__/lexer.cpython-34.pyc,,
+jinja2/__pycache__/constants.cpython-34.pyc,,
diff --git a/app/lib/Jinja2-2.9.6.dist-info/WHEEL b/app/lib/Jinja2-2.9.6.dist-info/WHEEL
new file mode 100644
index 0000000..9dff69d
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt b/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt
new file mode 100644
index 0000000..32e6b75
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+
+ [babel.extractors]
+ jinja2 = jinja2.ext:babel_extract[i18n]
+
\ No newline at end of file
diff --git a/app/lib/Jinja2-2.9.6.dist-info/metadata.json b/app/lib/Jinja2-2.9.6.dist-info/metadata.json
new file mode 100644
index 0000000..9bbf942
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/metadata.json
@@ -0,0 +1 @@
+{"license": "BSD", "name": "Jinja2", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "run_requires": [{"requires": ["Babel (>=0.8)"], "extra": "i18n"}, {"requires": ["MarkupSafe (>=0.23)"]}], "version": "2.9.6", "extensions": {"python.details": {"project_urls": {"Home": "http://jinja.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extras": ["i18n"]}
\ No newline at end of file
diff --git a/app/lib/Jinja2-2.9.6.dist-info/top_level.txt b/app/lib/Jinja2-2.9.6.dist-info/top_level.txt
new file mode 100644
index 0000000..7f7afbf
--- /dev/null
+++ b/app/lib/Jinja2-2.9.6.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinja2
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO b/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO
new file mode 100644
index 0000000..6f2568f
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/PKG-INFO
@@ -0,0 +1,133 @@
+Metadata-Version: 1.1
+Name: MarkupSafe
+Version: 1.0
+Summary: Implements a XML/HTML/XHTML Markup safe string for Python
+Home-page: http://github.com/pallets/markupsafe
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Description: MarkupSafe
+ ==========
+
+ Implements a unicode subclass that supports HTML strings:
+
+ .. code-block:: python
+
+ >>> from markupsafe import Markup, escape
+ >>> escape("")
+ Markup(u'<script>alert(document.cookie);</script>')
+ >>> tmpl = Markup("%s")
+ >>> tmpl % "Peter > Lustig"
+ Markup(u'Peter > Lustig')
+
+ If you want to make an object unicode that is not yet unicode
+ but don't want to lose the taint information, you can use the
+ ``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which
+ is a different name for the same function).
+
+ .. code-block:: python
+
+ >>> from markupsafe import soft_unicode
+ >>> soft_unicode(42)
+ u'42'
+ >>> soft_unicode(Markup('foo'))
+ Markup(u'foo')
+
+ HTML Representations
+ --------------------
+
+ Objects can customize their HTML markup equivalent by overriding
+ the ``__html__`` function:
+
+ .. code-block:: python
+
+ >>> class Foo(object):
+ ... def __html__(self):
+ ... return 'Nice'
+ ...
+ >>> escape(Foo())
+ Markup(u'Nice')
+ >>> Markup(Foo())
+ Markup(u'Nice')
+
+ Silent Escapes
+ --------------
+
+ Since MarkupSafe 0.10 there is now also a separate escape function
+ called ``escape_silent`` that returns an empty string for ``None`` for
+ consistency with other systems that return empty strings for ``None``
+ when escaping (for instance Pylons' webhelpers).
+
+ If you also want to use this for the escape method of the Markup
+ object, you can create your own subclass that does that:
+
+ .. code-block:: python
+
+ from markupsafe import Markup, escape_silent as escape
+
+ class SilentMarkup(Markup):
+ __slots__ = ()
+
+ @classmethod
+ def escape(cls, s):
+ return cls(escape(s))
+
+ New-Style String Formatting
+ ---------------------------
+
+ Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and
+ 3.x are now fully supported. Previously the escape behavior of those
+ functions was spotty at best. The new implementations operates under the
+ following algorithm:
+
+ 1. if an object has an ``__html_format__`` method it is called as
+ replacement for ``__format__`` with the format specifier. It either
+ has to return a string or markup object.
+ 2. if an object has an ``__html__`` method it is called.
+ 3. otherwise the default format system of Python kicks in and the result
+ is HTML escaped.
+
+ Here is how you can implement your own formatting:
+
+ .. code-block:: python
+
+ class User(object):
+
+ def __init__(self, id, username):
+ self.id = id
+ self.username = username
+
+ def __html_format__(self, format_spec):
+ if format_spec == 'link':
+ return Markup('{1}').format(
+ self.id,
+ self.__html__(),
+ )
+ elif format_spec:
+ raise ValueError('Invalid format spec')
+ return self.__html__()
+
+ def __html__(self):
+ return Markup('{0}').format(self.username)
+
+ And to format that user:
+
+ .. code-block:: python
+
+ >>> user = User(1, 'foo')
+ >>> Markup('User: {0:link}').format(user)
+ Markup(u'
User: foo')
+
+ Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt
new file mode 100644
index 0000000..210b339
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/SOURCES.txt
@@ -0,0 +1,18 @@
+AUTHORS
+CHANGES
+LICENSE
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+tests.py
+MarkupSafe.egg-info/PKG-INFO
+MarkupSafe.egg-info/SOURCES.txt
+MarkupSafe.egg-info/dependency_links.txt
+MarkupSafe.egg-info/not-zip-safe
+MarkupSafe.egg-info/top_level.txt
+markupsafe/__init__.py
+markupsafe/_compat.py
+markupsafe/_constants.py
+markupsafe/_native.py
+markupsafe/_speedups.c
\ No newline at end of file
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt
new file mode 100644
index 0000000..f430f15
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/installed-files.txt
@@ -0,0 +1,16 @@
+../markupsafe/__init__.py
+../markupsafe/_compat.py
+../markupsafe/_constants.py
+../markupsafe/_native.py
+../markupsafe/_speedups.c
+../markupsafe/__pycache__/__init__.cpython-34.pyc
+../markupsafe/__pycache__/_compat.cpython-34.pyc
+../markupsafe/__pycache__/_constants.cpython-34.pyc
+../markupsafe/__pycache__/_native.cpython-34.pyc
+../markupsafe/_speedups.so
+./
+dependency_links.txt
+not-zip-safe
+PKG-INFO
+SOURCES.txt
+top_level.txt
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe b/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt b/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt
new file mode 100644
index 0000000..75bf729
--- /dev/null
+++ b/app/lib/MarkupSafe-1.0-py3.4.egg-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO
new file mode 100644
index 0000000..5475a48
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/PKG-INFO
@@ -0,0 +1,155 @@
+Metadata-Version: 1.1
+Name: SQLAlchemy
+Version: 1.1.9
+Summary: Database Abstraction Library
+Home-page: http://www.sqlalchemy.org
+Author: Mike Bayer
+Author-email: mike_mp@zzzcomputing.com
+License: MIT License
+Description: SQLAlchemy
+ ==========
+
+ The Python SQL Toolkit and Object Relational Mapper
+
+ Introduction
+ -------------
+
+ SQLAlchemy is the Python SQL toolkit and Object Relational Mapper
+ that gives application developers the full power and
+ flexibility of SQL. SQLAlchemy provides a full suite
+ of well known enterprise-level persistence patterns,
+ designed for efficient and high-performing database
+ access, adapted into a simple and Pythonic domain
+ language.
+
+ Major SQLAlchemy features include:
+
+ * An industrial strength ORM, built
+ from the core on the identity map, unit of work,
+ and data mapper patterns. These patterns
+ allow transparent persistence of objects
+ using a declarative configuration system.
+ Domain models
+ can be constructed and manipulated naturally,
+ and changes are synchronized with the
+ current transaction automatically.
+ * A relationally-oriented query system, exposing
+ the full range of SQL's capabilities
+ explicitly, including joins, subqueries,
+ correlation, and most everything else,
+ in terms of the object model.
+ Writing queries with the ORM uses the same
+ techniques of relational composition you use
+ when writing SQL. While you can drop into
+ literal SQL at any time, it's virtually never
+ needed.
+ * A comprehensive and flexible system
+ of eager loading for related collections and objects.
+ Collections are cached within a session,
+ and can be loaded on individual access, all
+ at once using joins, or by query per collection
+ across the full result set.
+ * A Core SQL construction system and DBAPI
+ interaction layer. The SQLAlchemy Core is
+ separate from the ORM and is a full database
+ abstraction layer in its own right, and includes
+ an extensible Python-based SQL expression
+ language, schema metadata, connection pooling,
+ type coercion, and custom types.
+ * All primary and foreign key constraints are
+ assumed to be composite and natural. Surrogate
+ integer primary keys are of course still the
+ norm, but SQLAlchemy never assumes or hardcodes
+ to this model.
+ * Database introspection and generation. Database
+ schemas can be "reflected" in one step into
+ Python structures representing database metadata;
+ those same structures can then generate
+ CREATE statements right back out - all within
+ the Core, independent of the ORM.
+
+ SQLAlchemy's philosophy:
+
+ * SQL databases behave less and less like object
+ collections the more size and performance start to
+ matter; object collections behave less and less like
+ tables and rows the more abstraction starts to matter.
+ SQLAlchemy aims to accommodate both of these
+ principles.
+ * An ORM doesn't need to hide the "R". A relational
+ database provides rich, set-based functionality
+ that should be fully exposed. SQLAlchemy's
+ ORM provides an open-ended set of patterns
+ that allow a developer to construct a custom
+ mediation layer between a domain model and
+ a relational schema, turning the so-called
+ "object relational impedance" issue into
+ a distant memory.
+ * The developer, in all cases, makes all decisions
+ regarding the design, structure, and naming conventions
+ of both the object model as well as the relational
+ schema. SQLAlchemy only provides the means
+ to automate the execution of these decisions.
+ * With SQLAlchemy, there's no such thing as
+ "the ORM generated a bad query" - you
+ retain full control over the structure of
+ queries, including how joins are organized,
+ how subqueries and correlation is used, what
+ columns are requested. Everything SQLAlchemy
+ does is ultimately the result of a developer-
+ initiated decision.
+ * Don't use an ORM if the problem doesn't need one.
+ SQLAlchemy consists of a Core and separate ORM
+ component. The Core offers a full SQL expression
+ language that allows Pythonic construction
+ of SQL constructs that render directly to SQL
+ strings for a target database, returning
+ result sets that are essentially enhanced DBAPI
+ cursors.
+ * Transactions should be the norm. With SQLAlchemy's
+ ORM, nothing goes to permanent storage until
+ commit() is called. SQLAlchemy encourages applications
+ to create a consistent means of delineating
+ the start and end of a series of operations.
+ * Never render a literal value in a SQL statement.
+ Bound parameters are used to the greatest degree
+ possible, allowing query optimizers to cache
+ query plans effectively and making SQL injection
+ attacks a non-issue.
+
+ Documentation
+ -------------
+
+ Latest documentation is at:
+
+ http://www.sqlalchemy.org/docs/
+
+ Installation / Requirements
+ ---------------------------
+
+ Full documentation for installation is at
+ `Installation `_.
+
+ Getting Help / Development / Bug reporting
+ ------------------------------------------
+
+ Please refer to the `SQLAlchemy Community Guide `_.
+
+ License
+ -------
+
+ SQLAlchemy is distributed under the `MIT license
+ `_.
+
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: Jython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Database :: Front-Ends
+Classifier: Operating System :: OS Independent
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt
new file mode 100644
index 0000000..2de1f71
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/SOURCES.txt
@@ -0,0 +1,806 @@
+AUTHORS
+CHANGES
+LICENSE
+MANIFEST.in
+README.dialects.rst
+README.rst
+README.unittests.rst
+setup.cfg
+setup.py
+sqla_nose.py
+tox.ini
+doc/contents.html
+doc/copyright.html
+doc/genindex.html
+doc/glossary.html
+doc/index.html
+doc/intro.html
+doc/search.html
+doc/searchindex.js
+doc/_images/sqla_arch_small.png
+doc/_images/sqla_engine_arch.png
+doc/_modules/index.html
+doc/_modules/examples/adjacency_list/adjacency_list.html
+doc/_modules/examples/association/basic_association.html
+doc/_modules/examples/association/dict_of_sets_with_default.html
+doc/_modules/examples/association/proxied_association.html
+doc/_modules/examples/custom_attributes/active_column_defaults.html
+doc/_modules/examples/custom_attributes/custom_management.html
+doc/_modules/examples/custom_attributes/listen_for_events.html
+doc/_modules/examples/dogpile_caching/advanced.html
+doc/_modules/examples/dogpile_caching/caching_query.html
+doc/_modules/examples/dogpile_caching/environment.html
+doc/_modules/examples/dogpile_caching/fixture_data.html
+doc/_modules/examples/dogpile_caching/helloworld.html
+doc/_modules/examples/dogpile_caching/local_session_caching.html
+doc/_modules/examples/dogpile_caching/model.html
+doc/_modules/examples/dogpile_caching/relationship_caching.html
+doc/_modules/examples/dynamic_dict/dynamic_dict.html
+doc/_modules/examples/elementtree/adjacency_list.html
+doc/_modules/examples/elementtree/optimized_al.html
+doc/_modules/examples/elementtree/pickle.html
+doc/_modules/examples/generic_associations/discriminator_on_association.html
+doc/_modules/examples/generic_associations/generic_fk.html
+doc/_modules/examples/generic_associations/table_per_association.html
+doc/_modules/examples/generic_associations/table_per_related.html
+doc/_modules/examples/graphs/directed_graph.html
+doc/_modules/examples/inheritance/concrete.html
+doc/_modules/examples/inheritance/joined.html
+doc/_modules/examples/inheritance/single.html
+doc/_modules/examples/join_conditions/cast.html
+doc/_modules/examples/join_conditions/threeway.html
+doc/_modules/examples/large_collection/large_collection.html
+doc/_modules/examples/materialized_paths/materialized_paths.html
+doc/_modules/examples/nested_sets/nested_sets.html
+doc/_modules/examples/performance/__main__.html
+doc/_modules/examples/performance/bulk_inserts.html
+doc/_modules/examples/performance/bulk_updates.html
+doc/_modules/examples/performance/large_resultsets.html
+doc/_modules/examples/performance/short_selects.html
+doc/_modules/examples/performance/single_inserts.html
+doc/_modules/examples/postgis/postgis.html
+doc/_modules/examples/sharding/attribute_shard.html
+doc/_modules/examples/versioned_history/history_meta.html
+doc/_modules/examples/versioned_history/test_versioning.html
+doc/_modules/examples/versioned_rows/versioned_map.html
+doc/_modules/examples/versioned_rows/versioned_rows.html
+doc/_modules/examples/vertical/dictlike-polymorphic.html
+doc/_modules/examples/vertical/dictlike.html
+doc/_static/basic.css
+doc/_static/changelog.css
+doc/_static/comment-bright.png
+doc/_static/comment-close.png
+doc/_static/comment.png
+doc/_static/detectmobile.js
+doc/_static/docs.css
+doc/_static/doctools.js
+doc/_static/down-pressed.png
+doc/_static/down.png
+doc/_static/file.png
+doc/_static/init.js
+doc/_static/jquery-3.1.0.js
+doc/_static/jquery.js
+doc/_static/minus.png
+doc/_static/plus.png
+doc/_static/pygments.css
+doc/_static/searchtools.js
+doc/_static/sphinx_paramlinks.css
+doc/_static/underscore-1.3.1.js
+doc/_static/underscore.js
+doc/_static/up-pressed.png
+doc/_static/up.png
+doc/_static/websupport.js
+doc/build/Makefile
+doc/build/conf.py
+doc/build/contents.rst
+doc/build/copyright.rst
+doc/build/corrections.py
+doc/build/glossary.rst
+doc/build/index.rst
+doc/build/intro.rst
+doc/build/requirements.txt
+doc/build/sqla_arch_small.png
+doc/build/changelog/changelog_01.rst
+doc/build/changelog/changelog_02.rst
+doc/build/changelog/changelog_03.rst
+doc/build/changelog/changelog_04.rst
+doc/build/changelog/changelog_05.rst
+doc/build/changelog/changelog_06.rst
+doc/build/changelog/changelog_07.rst
+doc/build/changelog/changelog_08.rst
+doc/build/changelog/changelog_09.rst
+doc/build/changelog/changelog_10.rst
+doc/build/changelog/changelog_11.rst
+doc/build/changelog/index.rst
+doc/build/changelog/migration_04.rst
+doc/build/changelog/migration_05.rst
+doc/build/changelog/migration_06.rst
+doc/build/changelog/migration_07.rst
+doc/build/changelog/migration_08.rst
+doc/build/changelog/migration_09.rst
+doc/build/changelog/migration_10.rst
+doc/build/changelog/migration_11.rst
+doc/build/core/api_basics.rst
+doc/build/core/compiler.rst
+doc/build/core/connections.rst
+doc/build/core/constraints.rst
+doc/build/core/custom_types.rst
+doc/build/core/ddl.rst
+doc/build/core/defaults.rst
+doc/build/core/dml.rst
+doc/build/core/engines.rst
+doc/build/core/engines_connections.rst
+doc/build/core/event.rst
+doc/build/core/events.rst
+doc/build/core/exceptions.rst
+doc/build/core/expression_api.rst
+doc/build/core/functions.rst
+doc/build/core/index.rst
+doc/build/core/inspection.rst
+doc/build/core/interfaces.rst
+doc/build/core/internals.rst
+doc/build/core/metadata.rst
+doc/build/core/pooling.rst
+doc/build/core/reflection.rst
+doc/build/core/schema.rst
+doc/build/core/selectable.rst
+doc/build/core/serializer.rst
+doc/build/core/sqla_engine_arch.png
+doc/build/core/sqlelement.rst
+doc/build/core/tutorial.rst
+doc/build/core/type_api.rst
+doc/build/core/type_basics.rst
+doc/build/core/types.rst
+doc/build/dialects/firebird.rst
+doc/build/dialects/index.rst
+doc/build/dialects/mssql.rst
+doc/build/dialects/mysql.rst
+doc/build/dialects/oracle.rst
+doc/build/dialects/postgresql.rst
+doc/build/dialects/sqlite.rst
+doc/build/dialects/sybase.rst
+doc/build/faq/connections.rst
+doc/build/faq/index.rst
+doc/build/faq/metadata_schema.rst
+doc/build/faq/ormconfiguration.rst
+doc/build/faq/performance.rst
+doc/build/faq/sessions.rst
+doc/build/faq/sqlexpressions.rst
+doc/build/orm/backref.rst
+doc/build/orm/basic_relationships.rst
+doc/build/orm/cascades.rst
+doc/build/orm/classical.rst
+doc/build/orm/collections.rst
+doc/build/orm/composites.rst
+doc/build/orm/constructors.rst
+doc/build/orm/contextual.rst
+doc/build/orm/deprecated.rst
+doc/build/orm/events.rst
+doc/build/orm/examples.rst
+doc/build/orm/exceptions.rst
+doc/build/orm/extending.rst
+doc/build/orm/index.rst
+doc/build/orm/inheritance.rst
+doc/build/orm/internals.rst
+doc/build/orm/join_conditions.rst
+doc/build/orm/loading.rst
+doc/build/orm/loading_columns.rst
+doc/build/orm/loading_objects.rst
+doc/build/orm/loading_relationships.rst
+doc/build/orm/mapped_attributes.rst
+doc/build/orm/mapped_sql_expr.rst
+doc/build/orm/mapper_config.rst
+doc/build/orm/mapping_api.rst
+doc/build/orm/mapping_columns.rst
+doc/build/orm/mapping_styles.rst
+doc/build/orm/nonstandard_mappings.rst
+doc/build/orm/persistence_techniques.rst
+doc/build/orm/query.rst
+doc/build/orm/relationship_api.rst
+doc/build/orm/relationship_persistence.rst
+doc/build/orm/relationships.rst
+doc/build/orm/scalar_mapping.rst
+doc/build/orm/self_referential.rst
+doc/build/orm/session.rst
+doc/build/orm/session_api.rst
+doc/build/orm/session_basics.rst
+doc/build/orm/session_events.rst
+doc/build/orm/session_state_management.rst
+doc/build/orm/session_transaction.rst
+doc/build/orm/tutorial.rst
+doc/build/orm/versioning.rst
+doc/build/orm/extensions/associationproxy.rst
+doc/build/orm/extensions/automap.rst
+doc/build/orm/extensions/baked.rst
+doc/build/orm/extensions/horizontal_shard.rst
+doc/build/orm/extensions/hybrid.rst
+doc/build/orm/extensions/index.rst
+doc/build/orm/extensions/indexable.rst
+doc/build/orm/extensions/instrumentation.rst
+doc/build/orm/extensions/mutable.rst
+doc/build/orm/extensions/orderinglist.rst
+doc/build/orm/extensions/declarative/api.rst
+doc/build/orm/extensions/declarative/basic_use.rst
+doc/build/orm/extensions/declarative/index.rst
+doc/build/orm/extensions/declarative/inheritance.rst
+doc/build/orm/extensions/declarative/mixins.rst
+doc/build/orm/extensions/declarative/relationships.rst
+doc/build/orm/extensions/declarative/table_config.rst
+doc/build/texinputs/Makefile
+doc/build/texinputs/sphinx.sty
+doc/changelog/changelog_01.html
+doc/changelog/changelog_02.html
+doc/changelog/changelog_03.html
+doc/changelog/changelog_04.html
+doc/changelog/changelog_05.html
+doc/changelog/changelog_06.html
+doc/changelog/changelog_07.html
+doc/changelog/changelog_08.html
+doc/changelog/changelog_09.html
+doc/changelog/changelog_10.html
+doc/changelog/changelog_11.html
+doc/changelog/index.html
+doc/changelog/migration_04.html
+doc/changelog/migration_05.html
+doc/changelog/migration_06.html
+doc/changelog/migration_07.html
+doc/changelog/migration_08.html
+doc/changelog/migration_09.html
+doc/changelog/migration_10.html
+doc/changelog/migration_11.html
+doc/core/api_basics.html
+doc/core/compiler.html
+doc/core/connections.html
+doc/core/constraints.html
+doc/core/custom_types.html
+doc/core/ddl.html
+doc/core/defaults.html
+doc/core/dml.html
+doc/core/engines.html
+doc/core/engines_connections.html
+doc/core/event.html
+doc/core/events.html
+doc/core/exceptions.html
+doc/core/expression_api.html
+doc/core/functions.html
+doc/core/index.html
+doc/core/inspection.html
+doc/core/interfaces.html
+doc/core/internals.html
+doc/core/metadata.html
+doc/core/pooling.html
+doc/core/reflection.html
+doc/core/schema.html
+doc/core/selectable.html
+doc/core/serializer.html
+doc/core/sqlelement.html
+doc/core/tutorial.html
+doc/core/type_api.html
+doc/core/type_basics.html
+doc/core/types.html
+doc/dialects/firebird.html
+doc/dialects/index.html
+doc/dialects/mssql.html
+doc/dialects/mysql.html
+doc/dialects/oracle.html
+doc/dialects/postgresql.html
+doc/dialects/sqlite.html
+doc/dialects/sybase.html
+doc/faq/connections.html
+doc/faq/index.html
+doc/faq/metadata_schema.html
+doc/faq/ormconfiguration.html
+doc/faq/performance.html
+doc/faq/sessions.html
+doc/faq/sqlexpressions.html
+doc/orm/backref.html
+doc/orm/basic_relationships.html
+doc/orm/cascades.html
+doc/orm/classical.html
+doc/orm/collections.html
+doc/orm/composites.html
+doc/orm/constructors.html
+doc/orm/contextual.html
+doc/orm/deprecated.html
+doc/orm/events.html
+doc/orm/examples.html
+doc/orm/exceptions.html
+doc/orm/extending.html
+doc/orm/index.html
+doc/orm/inheritance.html
+doc/orm/internals.html
+doc/orm/join_conditions.html
+doc/orm/loading.html
+doc/orm/loading_columns.html
+doc/orm/loading_objects.html
+doc/orm/loading_relationships.html
+doc/orm/mapped_attributes.html
+doc/orm/mapped_sql_expr.html
+doc/orm/mapper_config.html
+doc/orm/mapping_api.html
+doc/orm/mapping_columns.html
+doc/orm/mapping_styles.html
+doc/orm/nonstandard_mappings.html
+doc/orm/persistence_techniques.html
+doc/orm/query.html
+doc/orm/relationship_api.html
+doc/orm/relationship_persistence.html
+doc/orm/relationships.html
+doc/orm/scalar_mapping.html
+doc/orm/self_referential.html
+doc/orm/session.html
+doc/orm/session_api.html
+doc/orm/session_basics.html
+doc/orm/session_events.html
+doc/orm/session_state_management.html
+doc/orm/session_transaction.html
+doc/orm/tutorial.html
+doc/orm/versioning.html
+doc/orm/extensions/associationproxy.html
+doc/orm/extensions/automap.html
+doc/orm/extensions/baked.html
+doc/orm/extensions/horizontal_shard.html
+doc/orm/extensions/hybrid.html
+doc/orm/extensions/index.html
+doc/orm/extensions/indexable.html
+doc/orm/extensions/instrumentation.html
+doc/orm/extensions/mutable.html
+doc/orm/extensions/orderinglist.html
+doc/orm/extensions/declarative/api.html
+doc/orm/extensions/declarative/basic_use.html
+doc/orm/extensions/declarative/index.html
+doc/orm/extensions/declarative/inheritance.html
+doc/orm/extensions/declarative/mixins.html
+doc/orm/extensions/declarative/relationships.html
+doc/orm/extensions/declarative/table_config.html
+examples/__init__.py
+examples/adjacency_list/__init__.py
+examples/adjacency_list/adjacency_list.py
+examples/association/__init__.py
+examples/association/basic_association.py
+examples/association/dict_of_sets_with_default.py
+examples/association/proxied_association.py
+examples/custom_attributes/__init__.py
+examples/custom_attributes/active_column_defaults.py
+examples/custom_attributes/custom_management.py
+examples/custom_attributes/listen_for_events.py
+examples/dogpile_caching/__init__.py
+examples/dogpile_caching/advanced.py
+examples/dogpile_caching/caching_query.py
+examples/dogpile_caching/environment.py
+examples/dogpile_caching/fixture_data.py
+examples/dogpile_caching/helloworld.py
+examples/dogpile_caching/local_session_caching.py
+examples/dogpile_caching/model.py
+examples/dogpile_caching/relationship_caching.py
+examples/dynamic_dict/__init__.py
+examples/dynamic_dict/dynamic_dict.py
+examples/elementtree/__init__.py
+examples/elementtree/adjacency_list.py
+examples/elementtree/optimized_al.py
+examples/elementtree/pickle.py
+examples/elementtree/test.xml
+examples/elementtree/test2.xml
+examples/elementtree/test3.xml
+examples/generic_associations/__init__.py
+examples/generic_associations/discriminator_on_association.py
+examples/generic_associations/generic_fk.py
+examples/generic_associations/table_per_association.py
+examples/generic_associations/table_per_related.py
+examples/graphs/__init__.py
+examples/graphs/directed_graph.py
+examples/inheritance/__init__.py
+examples/inheritance/concrete.py
+examples/inheritance/joined.py
+examples/inheritance/single.py
+examples/join_conditions/__init__.py
+examples/join_conditions/cast.py
+examples/join_conditions/threeway.py
+examples/large_collection/__init__.py
+examples/large_collection/large_collection.py
+examples/materialized_paths/__init__.py
+examples/materialized_paths/materialized_paths.py
+examples/nested_sets/__init__.py
+examples/nested_sets/nested_sets.py
+examples/performance/__init__.py
+examples/performance/__main__.py
+examples/performance/bulk_inserts.py
+examples/performance/bulk_updates.py
+examples/performance/large_resultsets.py
+examples/performance/short_selects.py
+examples/performance/single_inserts.py
+examples/postgis/__init__.py
+examples/postgis/postgis.py
+examples/sharding/__init__.py
+examples/sharding/attribute_shard.py
+examples/versioned_history/__init__.py
+examples/versioned_history/history_meta.py
+examples/versioned_history/test_versioning.py
+examples/versioned_rows/__init__.py
+examples/versioned_rows/versioned_map.py
+examples/versioned_rows/versioned_rows.py
+examples/vertical/__init__.py
+examples/vertical/dictlike-polymorphic.py
+examples/vertical/dictlike.py
+lib/SQLAlchemy.egg-info/PKG-INFO
+lib/SQLAlchemy.egg-info/SOURCES.txt
+lib/SQLAlchemy.egg-info/dependency_links.txt
+lib/SQLAlchemy.egg-info/requires.txt
+lib/SQLAlchemy.egg-info/top_level.txt
+lib/sqlalchemy/__init__.py
+lib/sqlalchemy/events.py
+lib/sqlalchemy/exc.py
+lib/sqlalchemy/inspection.py
+lib/sqlalchemy/interfaces.py
+lib/sqlalchemy/log.py
+lib/sqlalchemy/pool.py
+lib/sqlalchemy/processors.py
+lib/sqlalchemy/schema.py
+lib/sqlalchemy/types.py
+lib/sqlalchemy/cextension/processors.c
+lib/sqlalchemy/cextension/resultproxy.c
+lib/sqlalchemy/cextension/utils.c
+lib/sqlalchemy/connectors/__init__.py
+lib/sqlalchemy/connectors/mxodbc.py
+lib/sqlalchemy/connectors/pyodbc.py
+lib/sqlalchemy/connectors/zxJDBC.py
+lib/sqlalchemy/databases/__init__.py
+lib/sqlalchemy/dialects/__init__.py
+lib/sqlalchemy/dialects/type_migration_guidelines.txt
+lib/sqlalchemy/dialects/firebird/__init__.py
+lib/sqlalchemy/dialects/firebird/base.py
+lib/sqlalchemy/dialects/firebird/fdb.py
+lib/sqlalchemy/dialects/firebird/kinterbasdb.py
+lib/sqlalchemy/dialects/mssql/__init__.py
+lib/sqlalchemy/dialects/mssql/adodbapi.py
+lib/sqlalchemy/dialects/mssql/base.py
+lib/sqlalchemy/dialects/mssql/information_schema.py
+lib/sqlalchemy/dialects/mssql/mxodbc.py
+lib/sqlalchemy/dialects/mssql/pymssql.py
+lib/sqlalchemy/dialects/mssql/pyodbc.py
+lib/sqlalchemy/dialects/mssql/zxjdbc.py
+lib/sqlalchemy/dialects/mysql/__init__.py
+lib/sqlalchemy/dialects/mysql/base.py
+lib/sqlalchemy/dialects/mysql/cymysql.py
+lib/sqlalchemy/dialects/mysql/enumerated.py
+lib/sqlalchemy/dialects/mysql/gaerdbms.py
+lib/sqlalchemy/dialects/mysql/json.py
+lib/sqlalchemy/dialects/mysql/mysqlconnector.py
+lib/sqlalchemy/dialects/mysql/mysqldb.py
+lib/sqlalchemy/dialects/mysql/oursql.py
+lib/sqlalchemy/dialects/mysql/pymysql.py
+lib/sqlalchemy/dialects/mysql/pyodbc.py
+lib/sqlalchemy/dialects/mysql/reflection.py
+lib/sqlalchemy/dialects/mysql/types.py
+lib/sqlalchemy/dialects/mysql/zxjdbc.py
+lib/sqlalchemy/dialects/oracle/__init__.py
+lib/sqlalchemy/dialects/oracle/base.py
+lib/sqlalchemy/dialects/oracle/cx_oracle.py
+lib/sqlalchemy/dialects/oracle/zxjdbc.py
+lib/sqlalchemy/dialects/postgresql/__init__.py
+lib/sqlalchemy/dialects/postgresql/array.py
+lib/sqlalchemy/dialects/postgresql/base.py
+lib/sqlalchemy/dialects/postgresql/dml.py
+lib/sqlalchemy/dialects/postgresql/ext.py
+lib/sqlalchemy/dialects/postgresql/hstore.py
+lib/sqlalchemy/dialects/postgresql/json.py
+lib/sqlalchemy/dialects/postgresql/pg8000.py
+lib/sqlalchemy/dialects/postgresql/psycopg2.py
+lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
+lib/sqlalchemy/dialects/postgresql/pygresql.py
+lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+lib/sqlalchemy/dialects/postgresql/ranges.py
+lib/sqlalchemy/dialects/postgresql/zxjdbc.py
+lib/sqlalchemy/dialects/sqlite/__init__.py
+lib/sqlalchemy/dialects/sqlite/base.py
+lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
+lib/sqlalchemy/dialects/sqlite/pysqlite.py
+lib/sqlalchemy/dialects/sybase/__init__.py
+lib/sqlalchemy/dialects/sybase/base.py
+lib/sqlalchemy/dialects/sybase/mxodbc.py
+lib/sqlalchemy/dialects/sybase/pyodbc.py
+lib/sqlalchemy/dialects/sybase/pysybase.py
+lib/sqlalchemy/engine/__init__.py
+lib/sqlalchemy/engine/base.py
+lib/sqlalchemy/engine/default.py
+lib/sqlalchemy/engine/interfaces.py
+lib/sqlalchemy/engine/reflection.py
+lib/sqlalchemy/engine/result.py
+lib/sqlalchemy/engine/strategies.py
+lib/sqlalchemy/engine/threadlocal.py
+lib/sqlalchemy/engine/url.py
+lib/sqlalchemy/engine/util.py
+lib/sqlalchemy/event/__init__.py
+lib/sqlalchemy/event/api.py
+lib/sqlalchemy/event/attr.py
+lib/sqlalchemy/event/base.py
+lib/sqlalchemy/event/legacy.py
+lib/sqlalchemy/event/registry.py
+lib/sqlalchemy/ext/__init__.py
+lib/sqlalchemy/ext/associationproxy.py
+lib/sqlalchemy/ext/automap.py
+lib/sqlalchemy/ext/baked.py
+lib/sqlalchemy/ext/compiler.py
+lib/sqlalchemy/ext/horizontal_shard.py
+lib/sqlalchemy/ext/hybrid.py
+lib/sqlalchemy/ext/indexable.py
+lib/sqlalchemy/ext/instrumentation.py
+lib/sqlalchemy/ext/mutable.py
+lib/sqlalchemy/ext/orderinglist.py
+lib/sqlalchemy/ext/serializer.py
+lib/sqlalchemy/ext/declarative/__init__.py
+lib/sqlalchemy/ext/declarative/api.py
+lib/sqlalchemy/ext/declarative/base.py
+lib/sqlalchemy/ext/declarative/clsregistry.py
+lib/sqlalchemy/orm/__init__.py
+lib/sqlalchemy/orm/attributes.py
+lib/sqlalchemy/orm/base.py
+lib/sqlalchemy/orm/collections.py
+lib/sqlalchemy/orm/dependency.py
+lib/sqlalchemy/orm/deprecated_interfaces.py
+lib/sqlalchemy/orm/descriptor_props.py
+lib/sqlalchemy/orm/dynamic.py
+lib/sqlalchemy/orm/evaluator.py
+lib/sqlalchemy/orm/events.py
+lib/sqlalchemy/orm/exc.py
+lib/sqlalchemy/orm/identity.py
+lib/sqlalchemy/orm/instrumentation.py
+lib/sqlalchemy/orm/interfaces.py
+lib/sqlalchemy/orm/loading.py
+lib/sqlalchemy/orm/mapper.py
+lib/sqlalchemy/orm/path_registry.py
+lib/sqlalchemy/orm/persistence.py
+lib/sqlalchemy/orm/properties.py
+lib/sqlalchemy/orm/query.py
+lib/sqlalchemy/orm/relationships.py
+lib/sqlalchemy/orm/scoping.py
+lib/sqlalchemy/orm/session.py
+lib/sqlalchemy/orm/state.py
+lib/sqlalchemy/orm/strategies.py
+lib/sqlalchemy/orm/strategy_options.py
+lib/sqlalchemy/orm/sync.py
+lib/sqlalchemy/orm/unitofwork.py
+lib/sqlalchemy/orm/util.py
+lib/sqlalchemy/sql/__init__.py
+lib/sqlalchemy/sql/annotation.py
+lib/sqlalchemy/sql/base.py
+lib/sqlalchemy/sql/compiler.py
+lib/sqlalchemy/sql/crud.py
+lib/sqlalchemy/sql/ddl.py
+lib/sqlalchemy/sql/default_comparator.py
+lib/sqlalchemy/sql/dml.py
+lib/sqlalchemy/sql/elements.py
+lib/sqlalchemy/sql/expression.py
+lib/sqlalchemy/sql/functions.py
+lib/sqlalchemy/sql/naming.py
+lib/sqlalchemy/sql/operators.py
+lib/sqlalchemy/sql/schema.py
+lib/sqlalchemy/sql/selectable.py
+lib/sqlalchemy/sql/sqltypes.py
+lib/sqlalchemy/sql/type_api.py
+lib/sqlalchemy/sql/util.py
+lib/sqlalchemy/sql/visitors.py
+lib/sqlalchemy/testing/__init__.py
+lib/sqlalchemy/testing/assertions.py
+lib/sqlalchemy/testing/assertsql.py
+lib/sqlalchemy/testing/config.py
+lib/sqlalchemy/testing/engines.py
+lib/sqlalchemy/testing/entities.py
+lib/sqlalchemy/testing/exclusions.py
+lib/sqlalchemy/testing/fixtures.py
+lib/sqlalchemy/testing/mock.py
+lib/sqlalchemy/testing/pickleable.py
+lib/sqlalchemy/testing/profiling.py
+lib/sqlalchemy/testing/provision.py
+lib/sqlalchemy/testing/replay_fixture.py
+lib/sqlalchemy/testing/requirements.py
+lib/sqlalchemy/testing/runner.py
+lib/sqlalchemy/testing/schema.py
+lib/sqlalchemy/testing/util.py
+lib/sqlalchemy/testing/warnings.py
+lib/sqlalchemy/testing/plugin/__init__.py
+lib/sqlalchemy/testing/plugin/bootstrap.py
+lib/sqlalchemy/testing/plugin/noseplugin.py
+lib/sqlalchemy/testing/plugin/plugin_base.py
+lib/sqlalchemy/testing/plugin/pytestplugin.py
+lib/sqlalchemy/testing/suite/__init__.py
+lib/sqlalchemy/testing/suite/test_ddl.py
+lib/sqlalchemy/testing/suite/test_dialect.py
+lib/sqlalchemy/testing/suite/test_insert.py
+lib/sqlalchemy/testing/suite/test_reflection.py
+lib/sqlalchemy/testing/suite/test_results.py
+lib/sqlalchemy/testing/suite/test_select.py
+lib/sqlalchemy/testing/suite/test_sequence.py
+lib/sqlalchemy/testing/suite/test_types.py
+lib/sqlalchemy/testing/suite/test_update_delete.py
+lib/sqlalchemy/util/__init__.py
+lib/sqlalchemy/util/_collections.py
+lib/sqlalchemy/util/compat.py
+lib/sqlalchemy/util/deprecations.py
+lib/sqlalchemy/util/langhelpers.py
+lib/sqlalchemy/util/queue.py
+lib/sqlalchemy/util/topological.py
+test/__init__.py
+test/binary_data_one.dat
+test/binary_data_two.dat
+test/conftest.py
+test/requirements.py
+test/aaa_profiling/__init__.py
+test/aaa_profiling/test_compiler.py
+test/aaa_profiling/test_memusage.py
+test/aaa_profiling/test_orm.py
+test/aaa_profiling/test_pool.py
+test/aaa_profiling/test_resultset.py
+test/aaa_profiling/test_zoomark.py
+test/aaa_profiling/test_zoomark_orm.py
+test/base/__init__.py
+test/base/test_dependency.py
+test/base/test_events.py
+test/base/test_except.py
+test/base/test_inspect.py
+test/base/test_tutorials.py
+test/base/test_utils.py
+test/dialect/__init__.py
+test/dialect/test_firebird.py
+test/dialect/test_mxodbc.py
+test/dialect/test_oracle.py
+test/dialect/test_pyodbc.py
+test/dialect/test_sqlite.py
+test/dialect/test_suite.py
+test/dialect/test_sybase.py
+test/dialect/mssql/__init__.py
+test/dialect/mssql/test_compiler.py
+test/dialect/mssql/test_engine.py
+test/dialect/mssql/test_query.py
+test/dialect/mssql/test_reflection.py
+test/dialect/mssql/test_types.py
+test/dialect/mysql/__init__.py
+test/dialect/mysql/test_compiler.py
+test/dialect/mysql/test_dialect.py
+test/dialect/mysql/test_query.py
+test/dialect/mysql/test_reflection.py
+test/dialect/mysql/test_types.py
+test/dialect/postgresql/__init__.py
+test/dialect/postgresql/test_compiler.py
+test/dialect/postgresql/test_dialect.py
+test/dialect/postgresql/test_on_conflict.py
+test/dialect/postgresql/test_query.py
+test/dialect/postgresql/test_reflection.py
+test/dialect/postgresql/test_types.py
+test/engine/__init__.py
+test/engine/test_bind.py
+test/engine/test_ddlevents.py
+test/engine/test_execute.py
+test/engine/test_logging.py
+test/engine/test_parseconnect.py
+test/engine/test_pool.py
+test/engine/test_processors.py
+test/engine/test_reconnect.py
+test/engine/test_reflection.py
+test/engine/test_transaction.py
+test/ext/__init__.py
+test/ext/test_associationproxy.py
+test/ext/test_automap.py
+test/ext/test_baked.py
+test/ext/test_compiler.py
+test/ext/test_extendedattr.py
+test/ext/test_horizontal_shard.py
+test/ext/test_hybrid.py
+test/ext/test_indexable.py
+test/ext/test_mutable.py
+test/ext/test_orderinglist.py
+test/ext/test_serializer.py
+test/ext/declarative/__init__.py
+test/ext/declarative/test_basic.py
+test/ext/declarative/test_clsregistry.py
+test/ext/declarative/test_inheritance.py
+test/ext/declarative/test_mixin.py
+test/ext/declarative/test_reflection.py
+test/orm/__init__.py
+test/orm/_fixtures.py
+test/orm/test_association.py
+test/orm/test_assorted_eager.py
+test/orm/test_attributes.py
+test/orm/test_backref_mutations.py
+test/orm/test_bind.py
+test/orm/test_bulk.py
+test/orm/test_bundle.py
+test/orm/test_cascade.py
+test/orm/test_collection.py
+test/orm/test_compile.py
+test/orm/test_composites.py
+test/orm/test_cycles.py
+test/orm/test_default_strategies.py
+test/orm/test_defaults.py
+test/orm/test_deferred.py
+test/orm/test_deprecations.py
+test/orm/test_descriptor.py
+test/orm/test_dynamic.py
+test/orm/test_eager_relations.py
+test/orm/test_evaluator.py
+test/orm/test_events.py
+test/orm/test_expire.py
+test/orm/test_froms.py
+test/orm/test_generative.py
+test/orm/test_hasparent.py
+test/orm/test_immediate_load.py
+test/orm/test_inspect.py
+test/orm/test_instrumentation.py
+test/orm/test_joins.py
+test/orm/test_lazy_relations.py
+test/orm/test_load_on_fks.py
+test/orm/test_loading.py
+test/orm/test_lockmode.py
+test/orm/test_manytomany.py
+test/orm/test_mapper.py
+test/orm/test_merge.py
+test/orm/test_naturalpks.py
+test/orm/test_of_type.py
+test/orm/test_onetoone.py
+test/orm/test_options.py
+test/orm/test_pickled.py
+test/orm/test_query.py
+test/orm/test_rel_fn.py
+test/orm/test_relationships.py
+test/orm/test_scoping.py
+test/orm/test_selectable.py
+test/orm/test_session.py
+test/orm/test_subquery_relations.py
+test/orm/test_sync.py
+test/orm/test_transaction.py
+test/orm/test_unitofwork.py
+test/orm/test_unitofworkv2.py
+test/orm/test_update_delete.py
+test/orm/test_utils.py
+test/orm/test_validators.py
+test/orm/test_versioning.py
+test/orm/inheritance/__init__.py
+test/orm/inheritance/_poly_fixtures.py
+test/orm/inheritance/test_abc_inheritance.py
+test/orm/inheritance/test_abc_polymorphic.py
+test/orm/inheritance/test_assorted_poly.py
+test/orm/inheritance/test_basic.py
+test/orm/inheritance/test_concrete.py
+test/orm/inheritance/test_magazine.py
+test/orm/inheritance/test_manytomany.py
+test/orm/inheritance/test_poly_linked_list.py
+test/orm/inheritance/test_poly_persistence.py
+test/orm/inheritance/test_polymorphic_rel.py
+test/orm/inheritance/test_productspec.py
+test/orm/inheritance/test_relationship.py
+test/orm/inheritance/test_selects.py
+test/orm/inheritance/test_single.py
+test/orm/inheritance/test_with_poly.py
+test/perf/invalidate_stresstest.py
+test/perf/orm2010.py
+test/sql/__init__.py
+test/sql/test_case_statement.py
+test/sql/test_compiler.py
+test/sql/test_constraints.py
+test/sql/test_cte.py
+test/sql/test_ddlemit.py
+test/sql/test_defaults.py
+test/sql/test_delete.py
+test/sql/test_functions.py
+test/sql/test_generative.py
+test/sql/test_insert.py
+test/sql/test_insert_exec.py
+test/sql/test_inspect.py
+test/sql/test_join_rewriting.py
+test/sql/test_labels.py
+test/sql/test_lateral.py
+test/sql/test_metadata.py
+test/sql/test_operators.py
+test/sql/test_query.py
+test/sql/test_quote.py
+test/sql/test_resultset.py
+test/sql/test_returning.py
+test/sql/test_rowcount.py
+test/sql/test_selectable.py
+test/sql/test_tablesample.py
+test/sql/test_text.py
+test/sql/test_type_expressions.py
+test/sql/test_types.py
+test/sql/test_unicode.py
+test/sql/test_update.py
+test/sql/test_utils.py
\ No newline at end of file
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt
new file mode 100644
index 0000000..d2681e6
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/installed-files.txt
@@ -0,0 +1,387 @@
+../sqlalchemy/__init__.py
+../sqlalchemy/events.py
+../sqlalchemy/exc.py
+../sqlalchemy/inspection.py
+../sqlalchemy/interfaces.py
+../sqlalchemy/log.py
+../sqlalchemy/pool.py
+../sqlalchemy/processors.py
+../sqlalchemy/schema.py
+../sqlalchemy/types.py
+../sqlalchemy/connectors/__init__.py
+../sqlalchemy/connectors/mxodbc.py
+../sqlalchemy/connectors/pyodbc.py
+../sqlalchemy/connectors/zxJDBC.py
+../sqlalchemy/databases/__init__.py
+../sqlalchemy/dialects/__init__.py
+../sqlalchemy/engine/__init__.py
+../sqlalchemy/engine/base.py
+../sqlalchemy/engine/default.py
+../sqlalchemy/engine/interfaces.py
+../sqlalchemy/engine/reflection.py
+../sqlalchemy/engine/result.py
+../sqlalchemy/engine/strategies.py
+../sqlalchemy/engine/threadlocal.py
+../sqlalchemy/engine/url.py
+../sqlalchemy/engine/util.py
+../sqlalchemy/event/__init__.py
+../sqlalchemy/event/api.py
+../sqlalchemy/event/attr.py
+../sqlalchemy/event/base.py
+../sqlalchemy/event/legacy.py
+../sqlalchemy/event/registry.py
+../sqlalchemy/ext/__init__.py
+../sqlalchemy/ext/associationproxy.py
+../sqlalchemy/ext/automap.py
+../sqlalchemy/ext/baked.py
+../sqlalchemy/ext/compiler.py
+../sqlalchemy/ext/horizontal_shard.py
+../sqlalchemy/ext/hybrid.py
+../sqlalchemy/ext/indexable.py
+../sqlalchemy/ext/instrumentation.py
+../sqlalchemy/ext/mutable.py
+../sqlalchemy/ext/orderinglist.py
+../sqlalchemy/ext/serializer.py
+../sqlalchemy/orm/__init__.py
+../sqlalchemy/orm/attributes.py
+../sqlalchemy/orm/base.py
+../sqlalchemy/orm/collections.py
+../sqlalchemy/orm/dependency.py
+../sqlalchemy/orm/deprecated_interfaces.py
+../sqlalchemy/orm/descriptor_props.py
+../sqlalchemy/orm/dynamic.py
+../sqlalchemy/orm/evaluator.py
+../sqlalchemy/orm/events.py
+../sqlalchemy/orm/exc.py
+../sqlalchemy/orm/identity.py
+../sqlalchemy/orm/instrumentation.py
+../sqlalchemy/orm/interfaces.py
+../sqlalchemy/orm/loading.py
+../sqlalchemy/orm/mapper.py
+../sqlalchemy/orm/path_registry.py
+../sqlalchemy/orm/persistence.py
+../sqlalchemy/orm/properties.py
+../sqlalchemy/orm/query.py
+../sqlalchemy/orm/relationships.py
+../sqlalchemy/orm/scoping.py
+../sqlalchemy/orm/session.py
+../sqlalchemy/orm/state.py
+../sqlalchemy/orm/strategies.py
+../sqlalchemy/orm/strategy_options.py
+../sqlalchemy/orm/sync.py
+../sqlalchemy/orm/unitofwork.py
+../sqlalchemy/orm/util.py
+../sqlalchemy/sql/__init__.py
+../sqlalchemy/sql/annotation.py
+../sqlalchemy/sql/base.py
+../sqlalchemy/sql/compiler.py
+../sqlalchemy/sql/crud.py
+../sqlalchemy/sql/ddl.py
+../sqlalchemy/sql/default_comparator.py
+../sqlalchemy/sql/dml.py
+../sqlalchemy/sql/elements.py
+../sqlalchemy/sql/expression.py
+../sqlalchemy/sql/functions.py
+../sqlalchemy/sql/naming.py
+../sqlalchemy/sql/operators.py
+../sqlalchemy/sql/schema.py
+../sqlalchemy/sql/selectable.py
+../sqlalchemy/sql/sqltypes.py
+../sqlalchemy/sql/type_api.py
+../sqlalchemy/sql/util.py
+../sqlalchemy/sql/visitors.py
+../sqlalchemy/testing/__init__.py
+../sqlalchemy/testing/assertions.py
+../sqlalchemy/testing/assertsql.py
+../sqlalchemy/testing/config.py
+../sqlalchemy/testing/engines.py
+../sqlalchemy/testing/entities.py
+../sqlalchemy/testing/exclusions.py
+../sqlalchemy/testing/fixtures.py
+../sqlalchemy/testing/mock.py
+../sqlalchemy/testing/pickleable.py
+../sqlalchemy/testing/profiling.py
+../sqlalchemy/testing/provision.py
+../sqlalchemy/testing/replay_fixture.py
+../sqlalchemy/testing/requirements.py
+../sqlalchemy/testing/runner.py
+../sqlalchemy/testing/schema.py
+../sqlalchemy/testing/util.py
+../sqlalchemy/testing/warnings.py
+../sqlalchemy/util/__init__.py
+../sqlalchemy/util/_collections.py
+../sqlalchemy/util/compat.py
+../sqlalchemy/util/deprecations.py
+../sqlalchemy/util/langhelpers.py
+../sqlalchemy/util/queue.py
+../sqlalchemy/util/topological.py
+../sqlalchemy/dialects/firebird/__init__.py
+../sqlalchemy/dialects/firebird/base.py
+../sqlalchemy/dialects/firebird/fdb.py
+../sqlalchemy/dialects/firebird/kinterbasdb.py
+../sqlalchemy/dialects/mssql/__init__.py
+../sqlalchemy/dialects/mssql/adodbapi.py
+../sqlalchemy/dialects/mssql/base.py
+../sqlalchemy/dialects/mssql/information_schema.py
+../sqlalchemy/dialects/mssql/mxodbc.py
+../sqlalchemy/dialects/mssql/pymssql.py
+../sqlalchemy/dialects/mssql/pyodbc.py
+../sqlalchemy/dialects/mssql/zxjdbc.py
+../sqlalchemy/dialects/mysql/__init__.py
+../sqlalchemy/dialects/mysql/base.py
+../sqlalchemy/dialects/mysql/cymysql.py
+../sqlalchemy/dialects/mysql/enumerated.py
+../sqlalchemy/dialects/mysql/gaerdbms.py
+../sqlalchemy/dialects/mysql/json.py
+../sqlalchemy/dialects/mysql/mysqlconnector.py
+../sqlalchemy/dialects/mysql/mysqldb.py
+../sqlalchemy/dialects/mysql/oursql.py
+../sqlalchemy/dialects/mysql/pymysql.py
+../sqlalchemy/dialects/mysql/pyodbc.py
+../sqlalchemy/dialects/mysql/reflection.py
+../sqlalchemy/dialects/mysql/types.py
+../sqlalchemy/dialects/mysql/zxjdbc.py
+../sqlalchemy/dialects/oracle/__init__.py
+../sqlalchemy/dialects/oracle/base.py
+../sqlalchemy/dialects/oracle/cx_oracle.py
+../sqlalchemy/dialects/oracle/zxjdbc.py
+../sqlalchemy/dialects/postgresql/__init__.py
+../sqlalchemy/dialects/postgresql/array.py
+../sqlalchemy/dialects/postgresql/base.py
+../sqlalchemy/dialects/postgresql/dml.py
+../sqlalchemy/dialects/postgresql/ext.py
+../sqlalchemy/dialects/postgresql/hstore.py
+../sqlalchemy/dialects/postgresql/json.py
+../sqlalchemy/dialects/postgresql/pg8000.py
+../sqlalchemy/dialects/postgresql/psycopg2.py
+../sqlalchemy/dialects/postgresql/psycopg2cffi.py
+../sqlalchemy/dialects/postgresql/pygresql.py
+../sqlalchemy/dialects/postgresql/pypostgresql.py
+../sqlalchemy/dialects/postgresql/ranges.py
+../sqlalchemy/dialects/postgresql/zxjdbc.py
+../sqlalchemy/dialects/sqlite/__init__.py
+../sqlalchemy/dialects/sqlite/base.py
+../sqlalchemy/dialects/sqlite/pysqlcipher.py
+../sqlalchemy/dialects/sqlite/pysqlite.py
+../sqlalchemy/dialects/sybase/__init__.py
+../sqlalchemy/dialects/sybase/base.py
+../sqlalchemy/dialects/sybase/mxodbc.py
+../sqlalchemy/dialects/sybase/pyodbc.py
+../sqlalchemy/dialects/sybase/pysybase.py
+../sqlalchemy/ext/declarative/__init__.py
+../sqlalchemy/ext/declarative/api.py
+../sqlalchemy/ext/declarative/base.py
+../sqlalchemy/ext/declarative/clsregistry.py
+../sqlalchemy/testing/plugin/__init__.py
+../sqlalchemy/testing/plugin/bootstrap.py
+../sqlalchemy/testing/plugin/noseplugin.py
+../sqlalchemy/testing/plugin/plugin_base.py
+../sqlalchemy/testing/plugin/pytestplugin.py
+../sqlalchemy/testing/suite/__init__.py
+../sqlalchemy/testing/suite/test_ddl.py
+../sqlalchemy/testing/suite/test_dialect.py
+../sqlalchemy/testing/suite/test_insert.py
+../sqlalchemy/testing/suite/test_reflection.py
+../sqlalchemy/testing/suite/test_results.py
+../sqlalchemy/testing/suite/test_select.py
+../sqlalchemy/testing/suite/test_sequence.py
+../sqlalchemy/testing/suite/test_types.py
+../sqlalchemy/testing/suite/test_update_delete.py
+../sqlalchemy/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/__pycache__/events.cpython-34.pyc
+../sqlalchemy/__pycache__/exc.cpython-34.pyc
+../sqlalchemy/__pycache__/inspection.cpython-34.pyc
+../sqlalchemy/__pycache__/interfaces.cpython-34.pyc
+../sqlalchemy/__pycache__/log.cpython-34.pyc
+../sqlalchemy/__pycache__/pool.cpython-34.pyc
+../sqlalchemy/__pycache__/processors.cpython-34.pyc
+../sqlalchemy/__pycache__/schema.cpython-34.pyc
+../sqlalchemy/__pycache__/types.cpython-34.pyc
+../sqlalchemy/connectors/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/connectors/__pycache__/mxodbc.cpython-34.pyc
+../sqlalchemy/connectors/__pycache__/pyodbc.cpython-34.pyc
+../sqlalchemy/connectors/__pycache__/zxJDBC.cpython-34.pyc
+../sqlalchemy/databases/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/base.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/default.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/interfaces.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/reflection.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/result.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/strategies.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/threadlocal.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/url.cpython-34.pyc
+../sqlalchemy/engine/__pycache__/util.cpython-34.pyc
+../sqlalchemy/event/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/event/__pycache__/api.cpython-34.pyc
+../sqlalchemy/event/__pycache__/attr.cpython-34.pyc
+../sqlalchemy/event/__pycache__/base.cpython-34.pyc
+../sqlalchemy/event/__pycache__/legacy.cpython-34.pyc
+../sqlalchemy/event/__pycache__/registry.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/associationproxy.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/automap.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/baked.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/compiler.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/horizontal_shard.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/hybrid.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/indexable.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/instrumentation.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/mutable.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/orderinglist.cpython-34.pyc
+../sqlalchemy/ext/__pycache__/serializer.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/attributes.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/base.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/collections.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/dependency.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/descriptor_props.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/dynamic.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/evaluator.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/events.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/exc.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/identity.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/instrumentation.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/interfaces.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/loading.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/mapper.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/path_registry.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/persistence.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/properties.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/query.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/relationships.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/scoping.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/session.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/state.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/strategies.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/strategy_options.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/sync.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/unitofwork.cpython-34.pyc
+../sqlalchemy/orm/__pycache__/util.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/annotation.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/base.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/compiler.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/crud.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/ddl.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/default_comparator.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/dml.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/elements.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/expression.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/functions.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/naming.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/operators.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/schema.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/selectable.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/sqltypes.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/type_api.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/util.cpython-34.pyc
+../sqlalchemy/sql/__pycache__/visitors.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/assertions.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/assertsql.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/config.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/engines.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/entities.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/exclusions.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/fixtures.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/mock.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/pickleable.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/profiling.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/provision.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/replay_fixture.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/requirements.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/runner.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/schema.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/util.cpython-34.pyc
+../sqlalchemy/testing/__pycache__/warnings.cpython-34.pyc
+../sqlalchemy/util/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/util/__pycache__/_collections.cpython-34.pyc
+../sqlalchemy/util/__pycache__/compat.cpython-34.pyc
+../sqlalchemy/util/__pycache__/deprecations.cpython-34.pyc
+../sqlalchemy/util/__pycache__/langhelpers.cpython-34.pyc
+../sqlalchemy/util/__pycache__/queue.cpython-34.pyc
+../sqlalchemy/util/__pycache__/topological.cpython-34.pyc
+../sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/firebird/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-34.pyc
+../sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-34.pyc
+../sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/enumerated.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/json.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/reflection.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/types.cpython-34.pyc
+../sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-34.pyc
+../sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/oracle/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-34.pyc
+../sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/array.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/dml.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/ext.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/json.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/pygresql.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-34.pyc
+../sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-34.pyc
+../sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/sqlite/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-34.pyc
+../sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-34.pyc
+../sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/dialects/sybase/__pycache__/base.cpython-34.pyc
+../sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-34.pyc
+../sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-34.pyc
+../sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-34.pyc
+../sqlalchemy/ext/declarative/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/ext/declarative/__pycache__/api.cpython-34.pyc
+../sqlalchemy/ext/declarative/__pycache__/base.cpython-34.pyc
+../sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-34.pyc
+../sqlalchemy/testing/plugin/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-34.pyc
+../sqlalchemy/testing/plugin/__pycache__/noseplugin.cpython-34.pyc
+../sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-34.pyc
+../sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/__init__.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_insert.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_results.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_select.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_types.cpython-34.pyc
+../sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-34.pyc
+../sqlalchemy/cprocessors.so
+../sqlalchemy/cresultproxy.so
+../sqlalchemy/cutils.so
+./
+dependency_links.txt
+PKG-INFO
+requires.txt
+SOURCES.txt
+top_level.txt
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt
new file mode 100644
index 0000000..f2a83ed
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/requires.txt
@@ -0,0 +1,24 @@
+
+[mssql_pymssql]
+pymssql
+
+[mssql_pyodbc]
+pyodbc
+
+[mysql]
+mysqlclient
+
+[oracle]
+cx_oracle
+
+[postgresql]
+psycopg2
+
+[postgresql_pg8000]
+pg8000
+
+[postgresql_psycopg2cffi]
+psycopg2cffi
+
+[pymysql]
+pymysql
diff --git a/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt
new file mode 100644
index 0000000..39fb2be
--- /dev/null
+++ b/app/lib/SQLAlchemy-1.1.9-py3.4.egg-info/top_level.txt
@@ -0,0 +1 @@
+sqlalchemy
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst b/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000..3e3abb7
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,54 @@
+Werkzeug
+========
+
+Werkzeug started as simple collection of various utilities for WSGI
+applications and has become one of the most advanced WSGI utility
+modules. It includes a powerful debugger, full featured request and
+response objects, HTTP utilities to handle entity tags, cache control
+headers, HTTP dates, cookie handling, file uploads, a powerful URL
+routing system and a bunch of community contributed addon modules.
+
+Werkzeug is unicode aware and doesn't enforce a specific template
+engine, database adapter or anything else. It doesn't even enforce
+a specific way of handling requests and leaves all that up to the
+developer. It's most useful for end user applications which should work
+on as many server environments as possible (such as blogs, wikis,
+bulletin boards, etc.).
+
+Details and example applications are available on the
+`Werkzeug website `_.
+
+
+Features
+--------
+
+- unicode awareness
+
+- request and response objects
+
+- various utility functions for dealing with HTTP headers such as
+ `Accept` and `Cache-Control` headers.
+
+- thread local objects with proper cleanup at request end
+
+- an interactive debugger
+
+- A simple WSGI server with support for threading and forking
+ with an automatic reloader.
+
+- a flexible URL routing system with REST support.
+
+- fully WSGI compatible
+
+
+Development Version
+-------------------
+
+The Werkzeug development version can be installed by cloning the git
+repository from `github`_::
+
+ git clone git@github.com:pallets/werkzeug.git
+
+.. _github: http://github.com/pallets/werkzeug
+
+
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt b/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000..1c2e0b7
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/LICENSE.txt
@@ -0,0 +1,29 @@
+Copyright (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/METADATA b/app/lib/Werkzeug-0.12.1.dist-info/METADATA
new file mode 100644
index 0000000..86c5bdd
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/METADATA
@@ -0,0 +1,83 @@
+Metadata-Version: 2.0
+Name: Werkzeug
+Version: 0.12.1
+Summary: The Swiss Army knife of Python web development
+Home-page: http://werkzeug.pocoo.org/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Provides-Extra: termcolor
+Requires-Dist: termcolor; extra == 'termcolor'
+Provides-Extra: watchdog
+Requires-Dist: watchdog; extra == 'watchdog'
+
+Werkzeug
+========
+
+Werkzeug started as simple collection of various utilities for WSGI
+applications and has become one of the most advanced WSGI utility
+modules. It includes a powerful debugger, full featured request and
+response objects, HTTP utilities to handle entity tags, cache control
+headers, HTTP dates, cookie handling, file uploads, a powerful URL
+routing system and a bunch of community contributed addon modules.
+
+Werkzeug is unicode aware and doesn't enforce a specific template
+engine, database adapter or anything else. It doesn't even enforce
+a specific way of handling requests and leaves all that up to the
+developer. It's most useful for end user applications which should work
+on as many server environments as possible (such as blogs, wikis,
+bulletin boards, etc.).
+
+Details and example applications are available on the
+`Werkzeug website `_.
+
+
+Features
+--------
+
+- unicode awareness
+
+- request and response objects
+
+- various utility functions for dealing with HTTP headers such as
+ `Accept` and `Cache-Control` headers.
+
+- thread local objects with proper cleanup at request end
+
+- an interactive debugger
+
+- A simple WSGI server with support for threading and forking
+ with an automatic reloader.
+
+- a flexible URL routing system with REST support.
+
+- fully WSGI compatible
+
+
+Development Version
+-------------------
+
+The Werkzeug development version can be installed by cloning the git
+repository from `github`_::
+
+ git clone git@github.com:pallets/werkzeug.git
+
+.. _github: http://github.com/pallets/werkzeug
+
+
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/RECORD b/app/lib/Werkzeug-0.12.1.dist-info/RECORD
new file mode 100644
index 0000000..d6c4b77
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/RECORD
@@ -0,0 +1,94 @@
+werkzeug/posixemulation.py,sha256=xEF2Bxc-vUCPkiu4IbfWVd3LW7DROYAT-ExW6THqyzw,3519
+werkzeug/security.py,sha256=rbd9Q-Xga1NldPzuiVW5czvBGUXkhWARm_lxrT1Tx54,8990
+werkzeug/__init__.py,sha256=xQyZHWJtl_P88xARoyNi4QXv22yiApBfTRHsob__IFs,6864
+werkzeug/testapp.py,sha256=3HQRW1sHZKXuAjCvFMet4KXtQG3loYTFnvn6LWt-4zI,9396
+werkzeug/http.py,sha256=nrk-ASJzcKOuoBEz274TWA8jKt0CQSOBZuP_A0UASTA,36658
+werkzeug/routing.py,sha256=g25wg0GNfff8WcfRlc1ZxTGvz1KbVj09w2S7wxopseQ,66746
+werkzeug/utils.py,sha256=lkybtv_mq35zV1qhelvEcILTzrMUwZ9yon6E8XwapJE,22972
+werkzeug/exceptions.py,sha256=3wp95Hqj9FqV8MdikV99JRcHse_fSMn27V8tgP5Hw2c,20505
+werkzeug/_reloader.py,sha256=NkIXQCTa6b22wWLpXob_jIVUxux8LtAsfWehLkKt0iM,8816
+werkzeug/formparser.py,sha256=DxN53eOCb6i7PxqtldrF2Kv9Mx00BqW297N4t-RxkWE,21241
+werkzeug/_compat.py,sha256=8c4U9o6A_TR9nKCcTbpZNxpqCXcXDVIbFawwKM2s92c,6311
+werkzeug/datastructures.py,sha256=uajnldfkuVXEcE92qW-uo9YUAprO3Yh5dPDQfnUL9eE,89075
+werkzeug/wrappers.py,sha256=wceh1RhvhIZVzKuok3XMQ5jqjYYCEYv5JqKY3Nc_oRY,82986
+werkzeug/test.py,sha256=xnabNSpty66ftZiXHcoZaYFP1E4WUNxydw5Oe8Mjhoo,34795
+werkzeug/urls.py,sha256=fSbI4Gb29_p02Zk21VAZQRN1QdOVY9CNTgpb2rbajNQ,36710
+werkzeug/script.py,sha256=Jh9OAktqjLNc_IBBUatVM7uP5LDcbxaYA8n2ObnS4bo,11666
+werkzeug/useragents.py,sha256=Ck3G977Y0Rzdk9wFcLpL0PyOrONtdK1_d2Zexb78cX4,5640
+werkzeug/local.py,sha256=QdQhWV5L8p1Y1CJ1CDStwxaUs24SuN5aebHwjVD08C8,14553
+werkzeug/serving.py,sha256=bN5nO4zTqzs-QX4rBxbW1LVF5af0c_vCokhZHO4muUE,28961
+werkzeug/filesystem.py,sha256=hHWeWo_gqLMzTRfYt8-7n2wWcWUNTnDyudQDLOBEICE,2175
+werkzeug/wsgi.py,sha256=TjPo5ups3NI1RVVGdMvd3XaceqFtqlMX5X169gWWFrQ,42838
+werkzeug/_internal.py,sha256=sE2JbLnMzN9mRI1iipTYWrFAGEWaZVECqtHAiNEhqUE,13841
+werkzeug/contrib/fixers.py,sha256=gR06T-w71ur-tHQ_31kP_4jpOncPJ4Wc1dOqTvYusr8,10179
+werkzeug/contrib/limiter.py,sha256=iS8-ahPZ-JLRnmfIBzxpm7O_s3lPsiDMVWv7llAIDCI,1334
+werkzeug/contrib/__init__.py,sha256=f7PfttZhbrImqpr5Ezre8CXgwvcGUJK7zWNpO34WWrw,623
+werkzeug/contrib/testtools.py,sha256=G9xN-qeihJlhExrIZMCahvQOIDxdL9NiX874jiiHFMs,2453
+werkzeug/contrib/iterio.py,sha256=RlqDvGhz0RneTpzE8dVc-yWCUv4nkPl1jEc_EDp2fH0,10814
+werkzeug/contrib/cache.py,sha256=nyUUxsS0MTHiFmu-481y9PHd8NvWH5pzCoEX1yA0mHY,30341
+werkzeug/contrib/securecookie.py,sha256=bDsAJmslkwmXjycnPjEjWtfLBvhz0ud4z3k7tdezUVs,12174
+werkzeug/contrib/lint.py,sha256=qZlmqiWJ5tQJOEzLnPmHWA8eUEpcBIWkAb_V2RKJg4o,12558
+werkzeug/contrib/profiler.py,sha256=ISwCWvwVyGpDLRBRpLjo_qUWma6GXYBrTAco4PEQSHY,5151
+werkzeug/contrib/wrappers.py,sha256=v7OYlz7wQtDlS9fey75UiRZ1IkUWqCpzbhsLy4k14Hw,10398
+werkzeug/contrib/atom.py,sha256=qqfJcfIn2RYY-3hO3Oz0aLq9YuNubcPQ_KZcNsDwVJo,15575
+werkzeug/contrib/jsrouting.py,sha256=QTmgeDoKXvNK02KzXgx9lr3cAH6fAzpwF5bBdPNvJPs,8564
+werkzeug/contrib/sessions.py,sha256=39LVNvLbm5JWpbxM79WC2l87MJFbqeISARjwYbkJatw,12577
+werkzeug/debug/console.py,sha256=n3-dsKk1TsjnN-u4ZgmuWCU_HO0qw5IA7ttjhyyMM6I,5607
+werkzeug/debug/repr.py,sha256=bKqstDYGfECpeLerd48s_hxuqK4b6UWnjMu3d_DHO8I,9340
+werkzeug/debug/__init__.py,sha256=GTsOsjE3PqUAlsUVm2Mgc_KWA2kjjSsUz0JsM7Qu41w,17266
+werkzeug/debug/tbtools.py,sha256=rBudXCmkVdAKIcdhxANxgf09g6kQjJWW9_5bjSpr4OY,18451
+werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191
+werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818
+werkzeug/debug/shared/debugger.js,sha256=PKPVYuyO4SX1hkqLOwCLvmIEO5154WatFYaXE-zIfKI,6264
+werkzeug/debug/shared/style.css,sha256=IEO0PC2pWmh2aEyGCaN--txuWsRCliuhlbEhPDFwh0A,6270
+werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507
+werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673
+werkzeug/debug/shared/jquery.js,sha256=7LkWEzqTdpEfELxcZZlS6wAx5Ff13zZ83lYO2_ujj7g,95957
+werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220
+werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200
+Werkzeug-0.12.1.dist-info/DESCRIPTION.rst,sha256=z9r9xqJ0fYSAn1Tz7KRBdFGDerL2y4pHWSW_72pUgTc,1591
+Werkzeug-0.12.1.dist-info/metadata.json,sha256=gzIVhjk_QMpO2SldxIcRtgkXI3U-x92yMDpyuMy8-WM,1276
+Werkzeug-0.12.1.dist-info/RECORD,,
+Werkzeug-0.12.1.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9
+Werkzeug-0.12.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+Werkzeug-0.12.1.dist-info/LICENSE.txt,sha256=F84h8-PZAuC-Hq-_252D3yhH6mqIc-WUbXUPbfOtjXM,1532
+Werkzeug-0.12.1.dist-info/METADATA,sha256=QqCGwUjZaX70SDkofJYsMxbG6UfcXLb6pkvSMGKN5DA,2738
+werkzeug/__pycache__/testapp.cpython-34.pyc,,
+werkzeug/debug/__pycache__/console.cpython-34.pyc,,
+werkzeug/__pycache__/formparser.cpython-34.pyc,,
+werkzeug/__pycache__/test.cpython-34.pyc,,
+werkzeug/__pycache__/wsgi.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/iterio.cpython-34.pyc,,
+werkzeug/__pycache__/urls.cpython-34.pyc,,
+werkzeug/debug/__pycache__/tbtools.cpython-34.pyc,,
+werkzeug/__pycache__/wrappers.cpython-34.pyc,,
+werkzeug/debug/__pycache__/__init__.cpython-34.pyc,,
+werkzeug/__pycache__/_reloader.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/securecookie.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/lint.cpython-34.pyc,,
+werkzeug/__pycache__/security.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/profiler.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/atom.cpython-34.pyc,,
+werkzeug/__pycache__/filesystem.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/fixers.cpython-34.pyc,,
+werkzeug/__pycache__/exceptions.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/sessions.cpython-34.pyc,,
+werkzeug/__pycache__/utils.cpython-34.pyc,,
+werkzeug/__pycache__/routing.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/wrappers.cpython-34.pyc,,
+werkzeug/__pycache__/posixemulation.cpython-34.pyc,,
+werkzeug/__pycache__/useragents.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/limiter.cpython-34.pyc,,
+werkzeug/__pycache__/_compat.cpython-34.pyc,,
+werkzeug/__pycache__/http.cpython-34.pyc,,
+werkzeug/__pycache__/local.cpython-34.pyc,,
+werkzeug/__pycache__/script.cpython-34.pyc,,
+werkzeug/__pycache__/datastructures.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/__init__.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/cache.cpython-34.pyc,,
+werkzeug/debug/__pycache__/repr.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/testtools.cpython-34.pyc,,
+werkzeug/__pycache__/serving.cpython-34.pyc,,
+werkzeug/__pycache__/__init__.cpython-34.pyc,,
+werkzeug/contrib/__pycache__/jsrouting.cpython-34.pyc,,
+werkzeug/__pycache__/_internal.cpython-34.pyc,,
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/WHEEL b/app/lib/Werkzeug-0.12.1.dist-info/WHEEL
new file mode 100644
index 0000000..9dff69d
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/metadata.json b/app/lib/Werkzeug-0.12.1.dist-info/metadata.json
new file mode 100644
index 0000000..4fc2cf2
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"license": "BSD", "name": "Werkzeug", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "The Swiss Army knife of Python web development", "platform": "any", "run_requires": [{"requires": ["watchdog"], "extra": "watchdog"}, {"requires": ["termcolor"], "extra": "termcolor"}], "version": "0.12.1", "extensions": {"python.details": {"project_urls": {"Home": "http://werkzeug.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules"], "extras": ["termcolor", "watchdog"]}
\ No newline at end of file
diff --git a/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt b/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt
new file mode 100644
index 0000000..6fe8da8
--- /dev/null
+++ b/app/lib/Werkzeug-0.12.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+werkzeug
diff --git a/app/lib/click-6.7.dist-info/DESCRIPTION.rst b/app/lib/click-6.7.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000..e118723
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/DESCRIPTION.rst
@@ -0,0 +1,3 @@
+UNKNOWN
+
+
diff --git a/app/lib/click-6.7.dist-info/METADATA b/app/lib/click-6.7.dist-info/METADATA
new file mode 100644
index 0000000..1f10885
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/METADATA
@@ -0,0 +1,16 @@
+Metadata-Version: 2.0
+Name: click
+Version: 6.7
+Summary: A simple wrapper around optparse for powerful command line utilities.
+Home-page: http://github.com/mitsuhiko/click
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+
+UNKNOWN
+
+
diff --git a/app/lib/click-6.7.dist-info/RECORD b/app/lib/click-6.7.dist-info/RECORD
new file mode 100644
index 0000000..841d846
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/RECORD
@@ -0,0 +1,40 @@
+click/__init__.py,sha256=k8R00cFKWI8dhDVKQeLBlAdNh1CxerMEDRiGnr32gdw,2858
+click/_bashcomplete.py,sha256=82rMiibtEurdwBq60NHXVCBuGXJHDpblFO9o2YxJDF0,2423
+click/_compat.py,sha256=j59MpzxYGE-fTGj0A5sg8UI8GhHod1XMojiCA0jvbL0,21011
+click/_termui_impl.py,sha256=Ol1JJhvBRw3l8j1WIU0tOWjQtxxmwGE44lFDbzDqzoA,16395
+click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198
+click/_unicodefun.py,sha256=A3UOzJw6lEZyol2SBg3fNXgweTutaOzkJ61OB7vik3Y,4204
+click/_winconsole.py,sha256=MzG46DEYPoRyx4SO7EIhFuFZHESgooAfJLIukbB6p5c,7790
+click/core.py,sha256=M0nJ6Kkye7XZXYG7HCbkJWSfy14WHV6bQmGLACrOhKw,70254
+click/decorators.py,sha256=y7CX2needh8iRWafj-QS_hGQFsN24eyXAhx5Y2ATwas,10941
+click/exceptions.py,sha256=rOa0pP3PbSy0_AAPOW9irBEM8AJ3BySN-4z2VUwFVo4,6788
+click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889
+click/globals.py,sha256=PAgnKvGxq4YuEIldw3lgYOGBLYwsyxnm1IByBX3BFXo,1515
+click/parser.py,sha256=i01xgYuIA6AwQWEXjshwHSwnTR3gUep4FxJIfyW4ta4,15510
+click/termui.py,sha256=Bp99MSWQtyoWe1_7HggDmA77n--3KLxu7NsZMFMaCUo,21008
+click/testing.py,sha256=kJ9mjtJgwNAlkgKcFf9-ISxufmaPDbbuOHVC9WIvKdY,11002
+click/types.py,sha256=ZGb2lmFs5Vwd9loTRIMbGcqhPVOql8mGoBhWBRT6V4E,18864
+click/utils.py,sha256=1jalPlkUU28JReTEQeeSFtbJd-SirYWBNfjtELBKzT4,14916
+click-6.7.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10
+click-6.7.dist-info/METADATA,sha256=l6lAyogIUXiHKUK_rWguef-EMcvO5C6bXzFCNCcblbQ,424
+click-6.7.dist-info/RECORD,,
+click-6.7.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113
+click-6.7.dist-info/metadata.json,sha256=qg0uO6amNHkIkOxnmWX7Xa_DNQMQ62Q6drivuP9Gh1c,571
+click-6.7.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
+click/__pycache__/_bashcomplete.cpython-34.pyc,,
+click/__pycache__/types.cpython-34.pyc,,
+click/__pycache__/parser.cpython-34.pyc,,
+click/__pycache__/testing.cpython-34.pyc,,
+click/__pycache__/_textwrap.cpython-34.pyc,,
+click/__pycache__/core.cpython-34.pyc,,
+click/__pycache__/_winconsole.cpython-34.pyc,,
+click/__pycache__/decorators.cpython-34.pyc,,
+click/__pycache__/_compat.cpython-34.pyc,,
+click/__pycache__/termui.cpython-34.pyc,,
+click/__pycache__/_unicodefun.cpython-34.pyc,,
+click/__pycache__/__init__.cpython-34.pyc,,
+click/__pycache__/_termui_impl.cpython-34.pyc,,
+click/__pycache__/globals.cpython-34.pyc,,
+click/__pycache__/exceptions.cpython-34.pyc,,
+click/__pycache__/utils.cpython-34.pyc,,
+click/__pycache__/formatting.cpython-34.pyc,,
diff --git a/app/lib/click-6.7.dist-info/WHEEL b/app/lib/click-6.7.dist-info/WHEEL
new file mode 100644
index 0000000..7bf9daa
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0.a0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/app/lib/click-6.7.dist-info/metadata.json b/app/lib/click-6.7.dist-info/metadata.json
new file mode 100644
index 0000000..0a4cfb1
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3"], "extensions": {"python.details": {"contacts": [{"email": "armin.ronacher@active-4.com", "name": "Armin Ronacher", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/mitsuhiko/click"}}}, "generator": "bdist_wheel (0.30.0.a0)", "metadata_version": "2.0", "name": "click", "summary": "A simple wrapper around optparse for powerful command line utilities.", "version": "6.7"}
\ No newline at end of file
diff --git a/app/lib/click-6.7.dist-info/top_level.txt b/app/lib/click-6.7.dist-info/top_level.txt
new file mode 100644
index 0000000..dca9a90
--- /dev/null
+++ b/app/lib/click-6.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+click
diff --git a/app/lib/click/__init__.py b/app/lib/click/__init__.py
new file mode 100644
index 0000000..971e55d
--- /dev/null
+++ b/app/lib/click/__init__.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+"""
+ click
+ ~~~~~
+
+ Click is a simple Python module that wraps the stdlib's optparse to make
+ writing command line scripts fun. Unlike other modules, it's based around
+ a simple API that does not come with too much magic and is composable.
+
+ In case optparse ever gets removed from the stdlib, it will be shipped by
+ this module.
+
+ :copyright: (c) 2014 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+# Core classes
+from .core import Context, BaseCommand, Command, MultiCommand, Group, \
+ CommandCollection, Parameter, Option, Argument
+
+# Globals
+from .globals import get_current_context
+
+# Decorators
+from .decorators import pass_context, pass_obj, make_pass_decorator, \
+ command, group, argument, option, confirmation_option, \
+ password_option, version_option, help_option
+
+# Types
+from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
+ STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
+
+# Utilities
+from .utils import echo, get_binary_stream, get_text_stream, open_file, \
+ format_filename, get_app_dir, get_os_args
+
+# Terminal functions
+from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
+ progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
+ pause
+
+# Exceptions
+from .exceptions import ClickException, UsageError, BadParameter, \
+ FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
+ MissingParameter
+
+# Formatting
+from .formatting import HelpFormatter, wrap_text
+
+# Parsing
+from .parser import OptionParser
+
+
+__all__ = [
+ # Core classes
+ 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
+ 'CommandCollection', 'Parameter', 'Option', 'Argument',
+
+ # Globals
+ 'get_current_context',
+
+ # Decorators
+ 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
+ 'argument', 'option', 'confirmation_option', 'password_option',
+ 'version_option', 'help_option',
+
+ # Types
+ 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
+ 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
+
+ # Utilities
+ 'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
+ 'format_filename', 'get_app_dir', 'get_os_args',
+
+ # Terminal functions
+ 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
+ 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
+ 'getchar', 'pause',
+
+ # Exceptions
+ 'ClickException', 'UsageError', 'BadParameter', 'FileError',
+ 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
+ 'MissingParameter',
+
+ # Formatting
+ 'HelpFormatter', 'wrap_text',
+
+ # Parsing
+ 'OptionParser',
+]
+
+
+# Controls if click should emit the warning about the use of unicode
+# literals.
+disable_unicode_literals_warning = False
+
+
+__version__ = '6.7'
diff --git a/app/lib/click/_bashcomplete.py b/app/lib/click/_bashcomplete.py
new file mode 100644
index 0000000..d9d26d2
--- /dev/null
+++ b/app/lib/click/_bashcomplete.py
@@ -0,0 +1,83 @@
+import os
+import re
+from .utils import echo
+from .parser import split_arg_string
+from .core import MultiCommand, Option
+
+
+COMPLETION_SCRIPT = '''
+%(complete_func)s() {
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ %(autocomplete_var)s=complete $1 ) )
+ return 0
+}
+
+complete -F %(complete_func)s -o default %(script_names)s
+'''
+
+_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
+
+
+def get_completion_script(prog_name, complete_var):
+ cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
+ return (COMPLETION_SCRIPT % {
+ 'complete_func': '_%s_completion' % cf_name,
+ 'script_names': prog_name,
+ 'autocomplete_var': complete_var,
+ }).strip() + ';'
+
+
+def resolve_ctx(cli, prog_name, args):
+ ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+ while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):
+ a = ctx.protected_args + ctx.args
+ cmd = ctx.command.get_command(ctx, a[0])
+ if cmd is None:
+ return None
+ ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)
+ return ctx
+
+
+def get_choices(cli, prog_name, args, incomplete):
+ ctx = resolve_ctx(cli, prog_name, args)
+ if ctx is None:
+ return
+
+ choices = []
+ if incomplete and not incomplete[:1].isalnum():
+ for param in ctx.command.params:
+ if not isinstance(param, Option):
+ continue
+ choices.extend(param.opts)
+ choices.extend(param.secondary_opts)
+ elif isinstance(ctx.command, MultiCommand):
+ choices.extend(ctx.command.list_commands(ctx))
+
+ for item in choices:
+ if item.startswith(incomplete):
+ yield item
+
+
+def do_complete(cli, prog_name):
+ cwords = split_arg_string(os.environ['COMP_WORDS'])
+ cword = int(os.environ['COMP_CWORD'])
+ args = cwords[1:cword]
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ''
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ echo(item)
+
+ return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+ if complete_instr == 'source':
+ echo(get_completion_script(prog_name, complete_var))
+ return True
+ elif complete_instr == 'complete':
+ return do_complete(cli, prog_name)
+ return False
diff --git a/app/lib/click/_compat.py b/app/lib/click/_compat.py
new file mode 100644
index 0000000..2b43412
--- /dev/null
+++ b/app/lib/click/_compat.py
@@ -0,0 +1,648 @@
+import re
+import io
+import os
+import sys
+import codecs
+from weakref import WeakKeyDictionary
+
+
+PY2 = sys.version_info[0] == 2
+WIN = sys.platform.startswith('win')
+DEFAULT_COLUMNS = 80
+
+
+_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
+
+
+def get_filesystem_encoding():
+ return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(stream, encoding, errors):
+ if encoding is None:
+ encoding = get_best_encoding(stream)
+ if errors is None:
+ errors = 'replace'
+ return _NonClosingTextIOWrapper(stream, encoding, errors,
+ line_buffering=True)
+
+
+def is_ascii_encoding(encoding):
+ """Checks if a given encoding is ascii."""
+ try:
+ return codecs.lookup(encoding).name == 'ascii'
+ except LookupError:
+ return False
+
+
+def get_best_encoding(stream):
+ """Returns the default stream encoding if not found."""
+ rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
+ if is_ascii_encoding(rv):
+ return 'utf-8'
+ return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+
+ def __init__(self, stream, encoding, errors, **extra):
+ self._stream = stream = _FixupStream(stream)
+ io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
+
+ # The io module is a place where the Python 3 text behavior
+ # was forced upon Python 2, so we need to unbreak
+ # it to look like Python 2.
+ if PY2:
+ def write(self, x):
+ if isinstance(x, str) or is_bytes(x):
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(str(x))
+ return io.TextIOWrapper.write(self, x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __del__(self):
+ try:
+ self.detach()
+ except Exception:
+ pass
+
+ def isatty(self):
+ # https://bitbucket.org/pypy/pypy/issue/1803
+ return self._stream.isatty()
+
+
+class _FixupStream(object):
+ """The new io interface needs more from streams than streams
+ traditionally implement. As such, this fix-up code is necessary in
+ some circumstances.
+ """
+
+ def __init__(self, stream):
+ self._stream = stream
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+ def read1(self, size):
+ f = getattr(self._stream, 'read1', None)
+ if f is not None:
+ return f(size)
+ # We only dispatch to readline instead of read in Python 2 as we
+ # do not want cause problems with the different implementation
+ # of line buffering.
+ if PY2:
+ return self._stream.readline(size)
+ return self._stream.read(size)
+
+ def readable(self):
+ x = getattr(self._stream, 'readable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.read(0)
+ except Exception:
+ return False
+ return True
+
+ def writable(self):
+ x = getattr(self._stream, 'writable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.write('')
+ except Exception:
+ try:
+ self._stream.write(b'')
+ except Exception:
+ return False
+ return True
+
+ def seekable(self):
+ x = getattr(self._stream, 'seekable', None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.seek(self._stream.tell())
+ except Exception:
+ return False
+ return True
+
+
+if PY2:
+ text_type = unicode
+ bytes = str
+ raw_input = raw_input
+ string_types = (str, unicode)
+ iteritems = lambda x: x.iteritems()
+ range_type = xrange
+
+ def is_bytes(x):
+ return isinstance(x, (buffer, bytearray))
+
+ _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
+
+ # For Windows, we need to force stdout/stdin/stderr to binary if it's
+ # fetched for that. This obviously is not the most correct way to do
+ # it as it changes global state. Unfortunately, there does not seem to
+ # be a clear better way to do it as just reopening the file in binary
+ # mode does not change anything.
+ #
+ # An option would be to do what Python 3 does and to open the file as
+ # binary only, patch it back to the system, and then use a wrapper
+ # stream that converts newlines. It's not quite clear what's the
+ # correct option here.
+ #
+ # This code also lives in _winconsole for the fallback to the console
+ # emulation stream.
+ #
+ # There are also Windows environments where the `msvcrt` module is not
+ # available (which is why we use try-catch instead of the WIN variable
+ # here), such as the Google App Engine development server on Windows. In
+ # those cases there is just nothing we can do.
+ try:
+ import msvcrt
+ except ImportError:
+ set_binary_mode = lambda x: x
+ else:
+ def set_binary_mode(f):
+ try:
+ fileno = f.fileno()
+ except Exception:
+ pass
+ else:
+ msvcrt.setmode(fileno, os.O_BINARY)
+ return f
+
+ def isidentifier(x):
+ return _identifier_re.search(x) is not None
+
+ def get_binary_stdin():
+ return set_binary_mode(sys.stdin)
+
+ def get_binary_stdout():
+ return set_binary_mode(sys.stdout)
+
+ def get_binary_stderr():
+ return set_binary_mode(sys.stderr)
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdin, encoding, errors)
+
+ def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdout, encoding, errors)
+
+ def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stderr, encoding, errors)
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), 'replace')
+ return value
+else:
+ import io
+ text_type = str
+ raw_input = input
+ string_types = (str,)
+ range_type = range
+ isidentifier = lambda x: x.isidentifier()
+ iteritems = lambda x: iter(x.items())
+
+ def is_bytes(x):
+ return isinstance(x, (bytes, memoryview, bytearray))
+
+ def _is_binary_reader(stream, default=False):
+ try:
+ return isinstance(stream.read(0), bytes)
+ except Exception:
+ return default
+ # This happens in some cases where the stream was already
+ # closed. In this case, we assume the default.
+
+ def _is_binary_writer(stream, default=False):
+ try:
+ stream.write(b'')
+ except Exception:
+ try:
+ stream.write('')
+ return False
+ except Exception:
+ pass
+ return default
+ return True
+
+ def _find_binary_reader(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_reader(stream, False):
+ return stream
+
+ buf = getattr(stream, 'buffer', None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_reader(buf, True):
+ return buf
+
+ def _find_binary_writer(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detatching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_writer(stream, False):
+ return stream
+
+ buf = getattr(stream, 'buffer', None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_writer(buf, True):
+ return buf
+
+ def _stream_is_misconfigured(stream):
+ """A stream is misconfigured if its encoding is ASCII."""
+ # If the stream does not have an encoding set, we assume it's set
+ # to ASCII. This appears to happen in certain unittest
+ # environments. It's not quite clear what the correct behavior is
+ # but this at least will force Click to recover somehow.
+ return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
+
+ def _is_compatible_text_stream(stream, encoding, errors):
+ stream_encoding = getattr(stream, 'encoding', None)
+ stream_errors = getattr(stream, 'errors', None)
+
+ # Perfect match.
+ if stream_encoding == encoding and stream_errors == errors:
+ return True
+
+ # Otherwise, it's only a compatible stream if we did not ask for
+ # an encoding.
+ if encoding is None:
+ return stream_encoding is not None
+
+ return False
+
+ def _force_correct_text_reader(text_reader, encoding, errors):
+ if _is_binary_reader(text_reader, False):
+ binary_reader = text_reader
+ else:
+ # If there is no target encoding set, we need to verify that the
+ # reader is not actually misconfigured.
+ if encoding is None and not _stream_is_misconfigured(text_reader):
+ return text_reader
+
+ if _is_compatible_text_stream(text_reader, encoding, errors):
+ return text_reader
+
+ # If the reader has no encoding, we try to find the underlying
+ # binary reader for it. If that fails because the environment is
+ # misconfigured, we silently go with the same reader because this
+ # is too common to happen. In that case, mojibake is better than
+ # exceptions.
+ binary_reader = _find_binary_reader(text_reader)
+ if binary_reader is None:
+ return text_reader
+
+ # At this point, we default the errors to replace instead of strict
+ # because nobody handles those errors anyways and at this point
+ # we're so fundamentally fucked that nothing can repair it.
+ if errors is None:
+ errors = 'replace'
+ return _make_text_stream(binary_reader, encoding, errors)
+
+ def _force_correct_text_writer(text_writer, encoding, errors):
+ if _is_binary_writer(text_writer, False):
+ binary_writer = text_writer
+ else:
+ # If there is no target encoding set, we need to verify that the
+ # writer is not actually misconfigured.
+ if encoding is None and not _stream_is_misconfigured(text_writer):
+ return text_writer
+
+ if _is_compatible_text_stream(text_writer, encoding, errors):
+ return text_writer
+
+ # If the writer has no encoding, we try to find the underlying
+ # binary writer for it. If that fails because the environment is
+ # misconfigured, we silently go with the same writer because this
+ # is too common to happen. In that case, mojibake is better than
+ # exceptions.
+ binary_writer = _find_binary_writer(text_writer)
+ if binary_writer is None:
+ return text_writer
+
+ # At this point, we default the errors to replace instead of strict
+ # because nobody handles those errors anyways and at this point
+ # we're so fundamentally fucked that nothing can repair it.
+ if errors is None:
+ errors = 'replace'
+ return _make_text_stream(binary_writer, encoding, errors)
+
+ def get_binary_stdin():
+ reader = _find_binary_reader(sys.stdin)
+ if reader is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stdin.')
+ return reader
+
+ def get_binary_stdout():
+ writer = _find_binary_writer(sys.stdout)
+ if writer is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stdout.')
+ return writer
+
+ def get_binary_stderr():
+ writer = _find_binary_writer(sys.stderr)
+ if writer is None:
+ raise RuntimeError('Was not able to determine binary '
+ 'stream for sys.stderr.')
+ return writer
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_reader(sys.stdin, encoding, errors)
+
+ def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stdout, encoding, errors)
+
+ def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stderr, encoding, errors)
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), 'replace')
+ else:
+ value = value.encode('utf-8', 'surrogateescape') \
+ .decode('utf-8', 'replace')
+ return value
+
+
+def get_streerror(e, default=None):
+ if hasattr(e, 'strerror'):
+ msg = e.strerror
+ else:
+ if default is not None:
+ msg = default
+ else:
+ msg = str(e)
+ if isinstance(msg, bytes):
+ msg = msg.decode('utf-8', 'replace')
+ return msg
+
+
+def open_stream(filename, mode='r', encoding=None, errors='strict',
+ atomic=False):
+ # Standard streams first. These are simple because they don't need
+ # special handling for the atomic flag. It's entirely ignored.
+ if filename == '-':
+ if 'w' in mode:
+ if 'b' in mode:
+ return get_binary_stdout(), False
+ return get_text_stdout(encoding=encoding, errors=errors), False
+ if 'b' in mode:
+ return get_binary_stdin(), False
+ return get_text_stdin(encoding=encoding, errors=errors), False
+
+ # Non-atomic writes directly go out through the regular open functions.
+ if not atomic:
+ if encoding is None:
+ return open(filename, mode), True
+ return io.open(filename, mode, encoding=encoding, errors=errors), True
+
+ # Some usability stuff for atomic writes
+ if 'a' in mode:
+ raise ValueError(
+ 'Appending to an existing file is not supported, because that '
+ 'would involve an expensive `copy`-operation to a temporary '
+ 'file. Open the file in normal `w`-mode and copy explicitly '
+ 'if that\'s what you\'re after.'
+ )
+ if 'x' in mode:
+ raise ValueError('Use the `overwrite`-parameter instead.')
+ if 'w' not in mode:
+ raise ValueError('Atomic writes only make sense with `w`-mode.')
+
+ # Atomic writes are more complicated. They work by opening a file
+ # as a proxy in the same folder and then using the fdopen
+ # functionality to wrap it in a Python file. Then we wrap it in an
+ # atomic file that moves the file over on close.
+ import tempfile
+ fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
+ prefix='.__atomic-write')
+
+ if encoding is not None:
+ f = io.open(fd, mode, encoding=encoding, errors=errors)
+ else:
+ f = os.fdopen(fd, mode)
+
+ return _AtomicFile(f, tmp_filename, filename), True
+
+
+# Used in a destructor call, needs extra protection from interpreter cleanup.
+if hasattr(os, 'replace'):
+ _replace = os.replace
+ _can_replace = True
+else:
+ _replace = os.rename
+ _can_replace = not WIN
+
+
+class _AtomicFile(object):
+
+ def __init__(self, f, tmp_filename, real_filename):
+ self._f = f
+ self._tmp_filename = tmp_filename
+ self._real_filename = real_filename
+ self.closed = False
+
+ @property
+ def name(self):
+ return self._real_filename
+
+ def close(self, delete=False):
+ if self.closed:
+ return
+ self._f.close()
+ if not _can_replace:
+ try:
+ os.remove(self._real_filename)
+ except OSError:
+ pass
+ _replace(self._tmp_filename, self._real_filename)
+ self.closed = True
+
+ def __getattr__(self, name):
+ return getattr(self._f, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close(delete=exc_type is not None)
+
+ def __repr__(self):
+ return repr(self._f)
+
+
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+
+
+def strip_ansi(value):
+ return _ansi_re.sub('', value)
+
+
+def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ if stream is None:
+ stream = sys.stdin
+ return not isatty(stream)
+ return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama. This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+ # Windows has a smaller terminal
+ DEFAULT_COLUMNS = 79
+
+ from ._winconsole import _get_windows_console_stream
+
+ def _get_argv_encoding():
+ import locale
+ return locale.getpreferredencoding()
+
+ if PY2:
+ def raw_input(prompt=''):
+ sys.stderr.flush()
+ if prompt:
+ stdout = _default_text_stdout()
+ stdout.write(prompt)
+ stdin = _default_text_stdin()
+ return stdin.readline().rstrip('\r\n')
+
+ try:
+ import colorama
+ except ImportError:
+ pass
+ else:
+ _ansi_stream_wrappers = WeakKeyDictionary()
+
+ def auto_wrap_for_ansi(stream, color=None):
+ """This function wraps a stream so that calls through colorama
+ are issued to the win32 console API to recolor on demand. It
+ also ensures to reset the colors if a write call is interrupted
+ to not destroy the console afterwards.
+ """
+ try:
+ cached = _ansi_stream_wrappers.get(stream)
+ except Exception:
+ cached = None
+ if cached is not None:
+ return cached
+ strip = should_strip_ansi(stream, color)
+ ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+ rv = ansi_wrapper.stream
+ _write = rv.write
+
+ def _safe_write(s):
+ try:
+ return _write(s)
+ except:
+ ansi_wrapper.reset_all()
+ raise
+
+ rv.write = _safe_write
+ try:
+ _ansi_stream_wrappers[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ def get_winterm_size():
+ win = colorama.win32.GetConsoleScreenBufferInfo(
+ colorama.win32.STDOUT).srWindow
+ return win.Right - win.Left, win.Bottom - win.Top
+else:
+ def _get_argv_encoding():
+ return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
+
+ _get_windows_console_stream = lambda *x: None
+
+
+def term_len(x):
+ return len(strip_ansi(x))
+
+
+def isatty(stream):
+ try:
+ return stream.isatty()
+ except Exception:
+ return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+ cache = WeakKeyDictionary()
+ def func():
+ stream = src_func()
+ try:
+ rv = cache.get(stream)
+ except Exception:
+ rv = None
+ if rv is not None:
+ return rv
+ rv = wrapper_func()
+ try:
+ cache[stream] = rv
+ except Exception:
+ pass
+ return rv
+ return func
+
+
+_default_text_stdin = _make_cached_stream_func(
+ lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(
+ lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(
+ lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+ 'stdin': get_binary_stdin,
+ 'stdout': get_binary_stdout,
+ 'stderr': get_binary_stderr,
+}
+
+text_streams = {
+ 'stdin': get_text_stdin,
+ 'stdout': get_text_stdout,
+ 'stderr': get_text_stderr,
+}
diff --git a/app/lib/click/_termui_impl.py b/app/lib/click/_termui_impl.py
new file mode 100644
index 0000000..7cfd3d5
--- /dev/null
+++ b/app/lib/click/_termui_impl.py
@@ -0,0 +1,547 @@
+"""
+ click._termui_impl
+ ~~~~~~~~~~~~~~~~~~
+
+ This module contains implementations for the termui module. To keep the
+ import time of Click down, some infrequently used functionality is placed
+ in this module and only imported as needed.
+
+ :copyright: (c) 2014 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import time
+import math
+from ._compat import _default_text_stdout, range_type, PY2, isatty, \
+ open_stream, strip_ansi, term_len, get_best_encoding, WIN
+from .utils import echo
+from .exceptions import ClickException
+
+
+if os.name == 'nt':
+ BEFORE_BAR = '\r'
+ AFTER_BAR = '\n'
+else:
+ BEFORE_BAR = '\r\033[?25l'
+ AFTER_BAR = '\033[?25h\n'
+
+
+def _length_hint(obj):
+ """Returns the length hint of an object."""
+ try:
+ return len(obj)
+ except (AttributeError, TypeError):
+ try:
+ get_hint = type(obj).__length_hint__
+ except AttributeError:
+ return None
+ try:
+ hint = get_hint(obj)
+ except TypeError:
+ return None
+ if hint is NotImplemented or \
+ not isinstance(hint, (int, long)) or \
+ hint < 0:
+ return None
+ return hint
+
+
+class ProgressBar(object):
+
+ def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
+ bar_template='%(bar)s', info_sep=' ', show_eta=True,
+ show_percent=None, show_pos=False, item_show_func=None,
+ label=None, file=None, color=None, width=30):
+ self.fill_char = fill_char
+ self.empty_char = empty_char
+ self.bar_template = bar_template
+ self.info_sep = info_sep
+ self.show_eta = show_eta
+ self.show_percent = show_percent
+ self.show_pos = show_pos
+ self.item_show_func = item_show_func
+ self.label = label or ''
+ if file is None:
+ file = _default_text_stdout()
+ self.file = file
+ self.color = color
+ self.width = width
+ self.autowidth = width == 0
+
+ if length is None:
+ length = _length_hint(iterable)
+ if iterable is None:
+ if length is None:
+ raise TypeError('iterable or length is required')
+ iterable = range_type(length)
+ self.iter = iter(iterable)
+ self.length = length
+ self.length_known = length is not None
+ self.pos = 0
+ self.avg = []
+ self.start = self.last_eta = time.time()
+ self.eta_known = False
+ self.finished = False
+ self.max_width = None
+ self.entered = False
+ self.current_item = None
+ self.is_hidden = not isatty(self.file)
+ self._last_line = None
+
+ def __enter__(self):
+ self.entered = True
+ self.render_progress()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.render_finish()
+
+ def __iter__(self):
+ if not self.entered:
+ raise RuntimeError('You need to use progress bars in a with block.')
+ self.render_progress()
+ return self
+
+ def render_finish(self):
+ if self.is_hidden:
+ return
+ self.file.write(AFTER_BAR)
+ self.file.flush()
+
+ @property
+ def pct(self):
+ if self.finished:
+ return 1.0
+ return min(self.pos / (float(self.length) or 1), 1.0)
+
+ @property
+ def time_per_iteration(self):
+ if not self.avg:
+ return 0.0
+ return sum(self.avg) / float(len(self.avg))
+
+ @property
+ def eta(self):
+ if self.length_known and not self.finished:
+ return self.time_per_iteration * (self.length - self.pos)
+ return 0.0
+
+ def format_eta(self):
+ if self.eta_known:
+ t = self.eta + 1
+ seconds = t % 60
+ t /= 60
+ minutes = t % 60
+ t /= 60
+ hours = t % 24
+ t /= 24
+ if t > 0:
+ days = t
+ return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
+ else:
+ return '%02d:%02d:%02d' % (hours, minutes, seconds)
+ return ''
+
+ def format_pos(self):
+ pos = str(self.pos)
+ if self.length_known:
+ pos += '/%s' % self.length
+ return pos
+
+ def format_pct(self):
+ return ('% 4d%%' % int(self.pct * 100))[1:]
+
+ def format_progress_line(self):
+ show_percent = self.show_percent
+
+ info_bits = []
+ if self.length_known:
+ bar_length = int(self.pct * self.width)
+ bar = self.fill_char * bar_length
+ bar += self.empty_char * (self.width - bar_length)
+ if show_percent is None:
+ show_percent = not self.show_pos
+ else:
+ if self.finished:
+ bar = self.fill_char * self.width
+ else:
+ bar = list(self.empty_char * (self.width or 1))
+ if self.time_per_iteration != 0:
+ bar[int((math.cos(self.pos * self.time_per_iteration)
+ / 2.0 + 0.5) * self.width)] = self.fill_char
+ bar = ''.join(bar)
+
+ if self.show_pos:
+ info_bits.append(self.format_pos())
+ if show_percent:
+ info_bits.append(self.format_pct())
+ if self.show_eta and self.eta_known and not self.finished:
+ info_bits.append(self.format_eta())
+ if self.item_show_func is not None:
+ item_info = self.item_show_func(self.current_item)
+ if item_info is not None:
+ info_bits.append(item_info)
+
+ return (self.bar_template % {
+ 'label': self.label,
+ 'bar': bar,
+ 'info': self.info_sep.join(info_bits)
+ }).rstrip()
+
+ def render_progress(self):
+ from .termui import get_terminal_size
+ nl = False
+
+ if self.is_hidden:
+ buf = [self.label]
+ nl = True
+ else:
+ buf = []
+ # Update width in case the terminal has been resized
+ if self.autowidth:
+ old_width = self.width
+ self.width = 0
+ clutter_length = term_len(self.format_progress_line())
+ new_width = max(0, get_terminal_size()[0] - clutter_length)
+ if new_width < old_width:
+ buf.append(BEFORE_BAR)
+ buf.append(' ' * self.max_width)
+ self.max_width = new_width
+ self.width = new_width
+
+ clear_width = self.width
+ if self.max_width is not None:
+ clear_width = self.max_width
+
+ buf.append(BEFORE_BAR)
+ line = self.format_progress_line()
+ line_len = term_len(line)
+ if self.max_width is None or self.max_width < line_len:
+ self.max_width = line_len
+ buf.append(line)
+
+ buf.append(' ' * (clear_width - line_len))
+ line = ''.join(buf)
+
+ # Render the line only if it changed.
+ if line != self._last_line:
+ self._last_line = line
+ echo(line, file=self.file, color=self.color, nl=nl)
+ self.file.flush()
+
+ def make_step(self, n_steps):
+ self.pos += n_steps
+ if self.length_known and self.pos >= self.length:
+ self.finished = True
+
+ if (time.time() - self.last_eta) < 1.0:
+ return
+
+ self.last_eta = time.time()
+ self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
+
+ self.eta_known = self.length_known
+
+ def update(self, n_steps):
+ self.make_step(n_steps)
+ self.render_progress()
+
+ def finish(self):
+ self.eta_known = 0
+ self.current_item = None
+ self.finished = True
+
+ def next(self):
+ if self.is_hidden:
+ return next(self.iter)
+ try:
+ rv = next(self.iter)
+ self.current_item = rv
+ except StopIteration:
+ self.finish()
+ self.render_progress()
+ raise StopIteration()
+ else:
+ self.update(1)
+ return rv
+
+ if not PY2:
+ __next__ = next
+ del next
+
+
+def pager(text, color=None):
+ """Decide what method to use for paging through text."""
+ stdout = _default_text_stdout()
+ if not isatty(sys.stdin) or not isatty(stdout):
+ return _nullpager(stdout, text, color)
+ pager_cmd = (os.environ.get('PAGER', None) or '').strip()
+ if pager_cmd:
+ if WIN:
+ return _tempfilepager(text, pager_cmd, color)
+ return _pipepager(text, pager_cmd, color)
+ if os.environ.get('TERM') in ('dumb', 'emacs'):
+ return _nullpager(stdout, text, color)
+ if WIN or sys.platform.startswith('os2'):
+ return _tempfilepager(text, 'more <', color)
+ if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
+ return _pipepager(text, 'less', color)
+
+ import tempfile
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
+ return _pipepager(text, 'more', color)
+ return _nullpager(stdout, text, color)
+ finally:
+ os.unlink(filename)
+
+
+def _pipepager(text, cmd, color):
+ """Page through text by feeding it to another program. Invoking a
+ pager through this might support colors.
+ """
+ import subprocess
+ env = dict(os.environ)
+
+ # If we're piping to less we might support colors under the
+ # condition that
+ cmd_detail = cmd.rsplit('/', 1)[-1].split()
+ if color is None and cmd_detail[0] == 'less':
+ less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
+ if not less_flags:
+ env['LESS'] = '-R'
+ color = True
+ elif 'r' in less_flags or 'R' in less_flags:
+ color = True
+
+ if not color:
+ text = strip_ansi(text)
+
+ c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+ env=env)
+ encoding = get_best_encoding(c.stdin)
+ try:
+ c.stdin.write(text.encode(encoding, 'replace'))
+ c.stdin.close()
+ except (IOError, KeyboardInterrupt):
+ pass
+
+ # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+ # search or other commands inside less).
+ #
+ # That means when the user hits ^C, the parent process (click) terminates,
+ # but less is still alive, paging the output and messing up the terminal.
+ #
+ # If the user wants to make the pager exit on ^C, they should set
+ # `LESS='-K'`. It's not our decision to make.
+ while True:
+ try:
+ c.wait()
+ except KeyboardInterrupt:
+ pass
+ else:
+ break
+
+
+def _tempfilepager(text, cmd, color):
+ """Page through text by invoking a program on a temporary file."""
+ import tempfile
+ filename = tempfile.mktemp()
+ if not color:
+ text = strip_ansi(text)
+ encoding = get_best_encoding(sys.stdout)
+ with open_stream(filename, 'wb')[0] as f:
+ f.write(text.encode(encoding))
+ try:
+ os.system(cmd + ' "' + filename + '"')
+ finally:
+ os.unlink(filename)
+
+
+def _nullpager(stream, text, color):
+ """Simply print unformatted text. This is the ultimate fallback."""
+ if not color:
+ text = strip_ansi(text)
+ stream.write(text)
+
+
+class Editor(object):
+
+ def __init__(self, editor=None, env=None, require_save=True,
+ extension='.txt'):
+ self.editor = editor
+ self.env = env
+ self.require_save = require_save
+ self.extension = extension
+
+ def get_editor(self):
+ if self.editor is not None:
+ return self.editor
+ for key in 'VISUAL', 'EDITOR':
+ rv = os.environ.get(key)
+ if rv:
+ return rv
+ if WIN:
+ return 'notepad'
+ for editor in 'vim', 'nano':
+ if os.system('which %s >/dev/null 2>&1' % editor) == 0:
+ return editor
+ return 'vi'
+
+ def edit_file(self, filename):
+ import subprocess
+ editor = self.get_editor()
+ if self.env:
+ environ = os.environ.copy()
+ environ.update(self.env)
+ else:
+ environ = None
+ try:
+ c = subprocess.Popen('%s "%s"' % (editor, filename),
+ env=environ, shell=True)
+ exit_code = c.wait()
+ if exit_code != 0:
+ raise ClickException('%s: Editing failed!' % editor)
+ except OSError as e:
+ raise ClickException('%s: Editing failed: %s' % (editor, e))
+
+ def edit(self, text):
+ import tempfile
+
+ text = text or ''
+ if text and not text.endswith('\n'):
+ text += '\n'
+
+ fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
+ try:
+ if WIN:
+ encoding = 'utf-8-sig'
+ text = text.replace('\n', '\r\n')
+ else:
+ encoding = 'utf-8'
+ text = text.encode(encoding)
+
+ f = os.fdopen(fd, 'wb')
+ f.write(text)
+ f.close()
+ timestamp = os.path.getmtime(name)
+
+ self.edit_file(name)
+
+ if self.require_save \
+ and os.path.getmtime(name) == timestamp:
+ return None
+
+ f = open(name, 'rb')
+ try:
+ rv = f.read()
+ finally:
+ f.close()
+ return rv.decode('utf-8-sig').replace('\r\n', '\n')
+ finally:
+ os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+ import subprocess
+
+ def _unquote_file(url):
+ try:
+ import urllib
+ except ImportError:
+ import urllib
+ if url.startswith('file://'):
+ url = urllib.unquote(url[7:])
+ return url
+
+ if sys.platform == 'darwin':
+ args = ['open']
+ if wait:
+ args.append('-W')
+ if locate:
+ args.append('-R')
+ args.append(_unquote_file(url))
+ null = open('/dev/null', 'w')
+ try:
+ return subprocess.Popen(args, stderr=null).wait()
+ finally:
+ null.close()
+ elif WIN:
+ if locate:
+ url = _unquote_file(url)
+ args = 'explorer /select,"%s"' % _unquote_file(
+ url.replace('"', ''))
+ else:
+ args = 'start %s "" "%s"' % (
+ wait and '/WAIT' or '', url.replace('"', ''))
+ return os.system(args)
+
+ try:
+ if locate:
+ url = os.path.dirname(_unquote_file(url)) or '.'
+ else:
+ url = _unquote_file(url)
+ c = subprocess.Popen(['xdg-open', url])
+ if wait:
+ return c.wait()
+ return 0
+ except OSError:
+ if url.startswith(('http://', 'https://')) and not locate and not wait:
+ import webbrowser
+ webbrowser.open(url)
+ return 0
+ return 1
+
+
+def _translate_ch_to_exc(ch):
+ if ch == '\x03':
+ raise KeyboardInterrupt()
+ if ch == '\x04':
+ raise EOFError()
+
+
+if WIN:
+ import msvcrt
+
+ def getchar(echo):
+ rv = msvcrt.getch()
+ if echo:
+ msvcrt.putchar(rv)
+ _translate_ch_to_exc(rv)
+ if PY2:
+ enc = getattr(sys.stdin, 'encoding', None)
+ if enc is not None:
+ rv = rv.decode(enc, 'replace')
+ else:
+ rv = rv.decode('cp1252', 'replace')
+ return rv
+else:
+ import tty
+ import termios
+
+ def getchar(echo):
+ if not isatty(sys.stdin):
+ f = open('/dev/tty')
+ fd = f.fileno()
+ else:
+ fd = sys.stdin.fileno()
+ f = None
+ try:
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ ch = os.read(fd, 32)
+ if echo and isatty(sys.stdout):
+ sys.stdout.write(ch)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ sys.stdout.flush()
+ if f is not None:
+ f.close()
+ except termios.error:
+ pass
+ _translate_ch_to_exc(ch)
+ return ch.decode(get_best_encoding(sys.stdin), 'replace')
diff --git a/app/lib/click/_textwrap.py b/app/lib/click/_textwrap.py
new file mode 100644
index 0000000..7e77603
--- /dev/null
+++ b/app/lib/click/_textwrap.py
@@ -0,0 +1,38 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ last = reversed_chunks[-1]
+ cut = last[:space_left]
+ res = last[space_left:]
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ @contextmanager
+ def extra_indent(self, indent):
+ old_initial_indent = self.initial_indent
+ old_subsequent_indent = self.subsequent_indent
+ self.initial_indent += indent
+ self.subsequent_indent += indent
+ try:
+ yield
+ finally:
+ self.initial_indent = old_initial_indent
+ self.subsequent_indent = old_subsequent_indent
+
+ def indent_only(self, text):
+ rv = []
+ for idx, line in enumerate(text.splitlines()):
+ indent = self.initial_indent
+ if idx > 0:
+ indent = self.subsequent_indent
+ rv.append(indent + line)
+ return '\n'.join(rv)
diff --git a/app/lib/click/_unicodefun.py b/app/lib/click/_unicodefun.py
new file mode 100644
index 0000000..9e17a38
--- /dev/null
+++ b/app/lib/click/_unicodefun.py
@@ -0,0 +1,118 @@
+import os
+import sys
+import codecs
+
+from ._compat import PY2
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered. Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+click = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+def _find_unicode_literals_frame():
+ import __future__
+ frm = sys._getframe(1)
+ idx = 1
+ while frm is not None:
+ if frm.f_globals.get('__name__', '').startswith('click.'):
+ frm = frm.f_back
+ idx += 1
+ elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
+ return idx
+ else:
+ break
+ return 0
+
+
+def _check_for_unicode_literals():
+ if not __debug__:
+ return
+ if not PY2 or click.disable_unicode_literals_warning:
+ return
+ bad_frame = _find_unicode_literals_frame()
+ if bad_frame <= 0:
+ return
+ from warnings import warn
+ warn(Warning('Click detected the use of the unicode_literals '
+ '__future__ import. This is heavily discouraged '
+ 'because it can introduce subtle bugs in your '
+ 'code. You should instead use explicit u"" literals '
+ 'for your unicode strings. For more information see '
+ 'http://click.pocoo.org/python3/'),
+ stacklevel=bad_frame)
+
+
+def _verify_python3_env():
+ """Ensures that the environment is good for unicode on Python 3."""
+ if PY2:
+ return
+ try:
+ import locale
+ fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+ except Exception:
+ fs_enc = 'ascii'
+ if fs_enc != 'ascii':
+ return
+
+ extra = ''
+ if os.name == 'posix':
+ import subprocess
+ rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).communicate()[0]
+ good_locales = set()
+ has_c_utf8 = False
+
+ # Make sure we're operating on text here.
+ if isinstance(rv, bytes):
+ rv = rv.decode('ascii', 'replace')
+
+ for line in rv.splitlines():
+ locale = line.strip()
+ if locale.lower().endswith(('.utf-8', '.utf8')):
+ good_locales.add(locale)
+ if locale.lower() in ('c.utf8', 'c.utf-8'):
+ has_c_utf8 = True
+
+ extra += '\n\n'
+ if not good_locales:
+ extra += (
+ 'Additional information: on this system no suitable UTF-8\n'
+ 'locales were discovered. This most likely requires resolving\n'
+ 'by reconfiguring the locale system.'
+ )
+ elif has_c_utf8:
+ extra += (
+ 'This system supports the C.UTF-8 locale which is recommended.\n'
+ 'You might be able to resolve your issue by exporting the\n'
+ 'following environment variables:\n\n'
+ ' export LC_ALL=C.UTF-8\n'
+ ' export LANG=C.UTF-8'
+ )
+ else:
+ extra += (
+ 'This system lists a couple of UTF-8 supporting locales that\n'
+ 'you can pick from. The following suitable locales where\n'
+ 'discovered: %s'
+ ) % ', '.join(sorted(good_locales))
+
+ bad_locale = None
+ for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
+ if locale and locale.lower().endswith(('.utf-8', '.utf8')):
+ bad_locale = locale
+ if locale is not None:
+ break
+ if bad_locale is not None:
+ extra += (
+ '\n\nClick discovered that you exported a UTF-8 locale\n'
+ 'but the locale system could not pick up from it because\n'
+ 'it does not exist. The exported locale is "%s" but it\n'
+ 'is not supported'
+ ) % bad_locale
+
+ raise RuntimeError('Click will abort further execution because Python 3 '
+ 'was configured to use ASCII as encoding for the '
+ 'environment. Consult http://click.pocoo.org/python3/'
+ 'for mitigation steps.' + extra)
diff --git a/app/lib/click/_winconsole.py b/app/lib/click/_winconsole.py
new file mode 100644
index 0000000..9aed942
--- /dev/null
+++ b/app/lib/click/_winconsole.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prmopt.
+
+import io
+import os
+import sys
+import zlib
+import time
+import ctypes
+import msvcrt
+from click._compat import _NonClosingTextIOWrapper, text_type, PY2
+from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
+ c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
+try:
+ from ctypes import pythonapi
+ PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+ PyBuffer_Release = pythonapi.PyBuffer_Release
+except ImportError:
+ pythonapi = None
+from ctypes.wintypes import LPWSTR, LPCWSTR
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(
+ ('GetCommandLineW', windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(
+ POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+ ('CommandLineToArgvW', windll.shell32))
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b'\x1a'
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+ _fields_ = [
+ ('buf', c_void_p),
+ ('obj', py_object),
+ ('len', c_ssize_t),
+ ('itemsize', c_ssize_t),
+ ('readonly', c_int),
+ ('ndim', c_int),
+ ('format', c_char_p),
+ ('shape', c_ssize_p),
+ ('strides', c_ssize_p),
+ ('suboffsets', c_ssize_p),
+ ('internal', c_void_p)
+ ]
+
+ if PY2:
+ _fields_.insert(-1, ('smalltable', c_ssize_t * 2))
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# serverly limited.
+if pythonapi is None:
+ get_buffer = None
+else:
+ def get_buffer(obj, writable=False):
+ buf = Py_buffer()
+ flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+ PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+ try:
+ buffer_type = c_char * buf.len
+ return buffer_type.from_address(buf.buf)
+ finally:
+ PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+
+ def __init__(self, handle):
+ self.handle = handle
+
+ def isatty(self):
+ io.RawIOBase.isatty(self)
+ return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ bytes_to_be_read = len(b)
+ if not bytes_to_be_read:
+ return 0
+ elif bytes_to_be_read % 2:
+ raise ValueError('cannot read odd number of bytes from '
+ 'UTF-16-LE encoded console')
+
+ buffer = get_buffer(b, writable=True)
+ code_units_to_be_read = bytes_to_be_read // 2
+ code_units_read = c_ulong()
+
+ rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
+ byref(code_units_read), None)
+ if GetLastError() == ERROR_OPERATION_ABORTED:
+ # wait for KeyboardInterrupt
+ time.sleep(0.1)
+ if not rv:
+ raise OSError('Windows error: %s' % GetLastError())
+
+ if buffer[0] == EOF:
+ return 0
+ return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+
+ def writable(self):
+ return True
+
+ @staticmethod
+ def _get_error_message(errno):
+ if errno == ERROR_SUCCESS:
+ return 'ERROR_SUCCESS'
+ elif errno == ERROR_NOT_ENOUGH_MEMORY:
+ return 'ERROR_NOT_ENOUGH_MEMORY'
+ return 'Windows error %s' % errno
+
+ def write(self, b):
+ bytes_to_be_written = len(b)
+ buf = get_buffer(b)
+ code_units_to_be_written = min(bytes_to_be_written,
+ MAX_BYTES_WRITTEN) // 2
+ code_units_written = c_ulong()
+
+ WriteConsoleW(self.handle, buf, code_units_to_be_written,
+ byref(code_units_written), None)
+ bytes_written = 2 * code_units_written.value
+
+ if bytes_written == 0 and bytes_to_be_written > 0:
+ raise OSError(self._get_error_message(GetLastError()))
+ return bytes_written
+
+
+class ConsoleStream(object):
+
+ def __init__(self, text_stream, byte_stream):
+ self._text_stream = text_stream
+ self.buffer = byte_stream
+
+ @property
+ def name(self):
+ return self.buffer.name
+
+ def write(self, x):
+ if isinstance(x, text_type):
+ return self._text_stream.write(x)
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._text_stream, name)
+
+ def isatty(self):
+ return self.buffer.isatty()
+
+ def __repr__(self):
+ return '' % (
+ self.name,
+ self.encoding,
+ )
+
+
+def _get_text_stdin(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ _WindowsConsoleWriter(STDOUT_HANDLE),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ _WindowsConsoleWriter(STDERR_HANDLE),
+ 'utf-16-le', 'strict', line_buffering=True)
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+if PY2:
+ def _hash_py_argv():
+ return zlib.crc32('\x00'.join(sys.argv[1:]))
+
+ _initial_argv_hash = _hash_py_argv()
+
+ def _get_windows_argv():
+ argc = c_int(0)
+ argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
+ argv = [argv_unicode[i] for i in range(0, argc.value)]
+
+ if not hasattr(sys, 'frozen'):
+ argv = argv[1:]
+ while len(argv) > 0:
+ arg = argv[0]
+ if not arg.startswith('-') or arg == '-':
+ break
+ argv = argv[1:]
+ if arg.startswith(('-c', '-m')):
+ break
+
+ return argv[1:]
+
+
+_stream_factories = {
+ 0: _get_text_stdin,
+ 1: _get_text_stdout,
+ 2: _get_text_stderr,
+}
+
+
+def _get_windows_console_stream(f, encoding, errors):
+ if get_buffer is not None and \
+ encoding in ('utf-16-le', None) \
+ and errors in ('strict', None) and \
+ hasattr(f, 'isatty') and f.isatty():
+ func = _stream_factories.get(f.fileno())
+ if func is not None:
+ if not PY2:
+ f = getattr(f, 'buffer')
+ if f is None:
+ return None
+ else:
+ # If we are on Python 2 we need to set the stream that we
+ # deal with to binary mode as otherwise the exercise if a
+ # bit moot. The same problems apply as for
+ # get_binary_stdin and friends from _compat.
+ msvcrt.setmode(f.fileno(), os.O_BINARY)
+ return func(f)
diff --git a/app/lib/click/core.py b/app/lib/click/core.py
new file mode 100644
index 0000000..7456451
--- /dev/null
+++ b/app/lib/click/core.py
@@ -0,0 +1,1744 @@
+import errno
+import os
+import sys
+from contextlib import contextmanager
+from itertools import repeat
+from functools import update_wrapper
+
+from .types import convert_type, IntRange, BOOL
+from .utils import make_str, make_default_short_help, echo, get_os_args
+from .exceptions import ClickException, UsageError, BadParameter, Abort, \
+ MissingParameter
+from .termui import prompt, confirm
+from .formatting import HelpFormatter, join_options
+from .parser import OptionParser, split_opt
+from .globals import push_context, pop_context
+
+from ._compat import PY2, isidentifier, iteritems
+from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
+
+
+_missing = object()
+
+
+SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
+SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
+
+
+def _bashcomplete(cmd, prog_name, complete_var=None):
+ """Internal handler for the bash completion support."""
+ if complete_var is None:
+ complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
+ complete_instr = os.environ.get(complete_var)
+ if not complete_instr:
+ return
+
+ from ._bashcomplete import bashcomplete
+ if bashcomplete(cmd, prog_name, complete_var, complete_instr):
+ sys.exit(1)
+
+
+def _check_multicommand(base_command, cmd_name, cmd, register=False):
+ if not base_command.chain or not isinstance(cmd, MultiCommand):
+ return
+ if register:
+ hint = 'It is not possible to add multi commands as children to ' \
+ 'another multi command that is in chain mode'
+ else:
+ hint = 'Found a multi command as subcommand to a multi command ' \
+ 'that is in chain mode. This is not supported'
+ raise RuntimeError('%s. Command "%s" is set to chain and "%s" was '
+ 'added as subcommand but it in itself is a '
+ 'multi command. ("%s" is a %s within a chained '
+ '%s named "%s"). This restriction was supposed to '
+ 'be lifted in 6.0 but the fix was flawed. This '
+ 'will be fixed in Click 7.0' % (
+ hint, base_command.name, cmd_name,
+ cmd_name, cmd.__class__.__name__,
+ base_command.__class__.__name__,
+ base_command.name))
+
+
+def batch(iterable, batch_size):
+ return list(zip(*repeat(iter(iterable), batch_size)))
+
+
+def invoke_param_callback(callback, ctx, param, value):
+ code = getattr(callback, '__code__', None)
+ args = getattr(code, 'co_argcount', 3)
+
+ if args < 3:
+ # This will become a warning in Click 3.0:
+ from warnings import warn
+ warn(Warning('Invoked legacy parameter callback "%s". The new '
+ 'signature for such callbacks starting with '
+ 'click 2.0 is (ctx, param, value).'
+ % callback), stacklevel=3)
+ return callback(ctx, value)
+ return callback(ctx, param, value)
+
+
+@contextmanager
+def augment_usage_errors(ctx, param=None):
+ """Context manager that attaches extra information to exceptions that
+ fly.
+ """
+ try:
+ yield
+ except BadParameter as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ if param is not None and e.param is None:
+ e.param = param
+ raise
+ except UsageError as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ raise
+
+
+def iter_params_for_processing(invocation_order, declaration_order):
+ """Given a sequence of parameters in the order as should be considered
+ for processing and an iterable of parameters that exist, this returns
+ a list in the correct order as they should be processed.
+ """
+ def sort_key(item):
+ try:
+ idx = invocation_order.index(item)
+ except ValueError:
+ idx = float('inf')
+ return (not item.is_eager, idx)
+
+ return sorted(declaration_order, key=sort_key)
+
+
+class Context(object):
+ """The context is a special internal object that holds state relevant
+ for the script execution at every single level. It's normally invisible
+ to commands unless they opt-in to getting access to it.
+
+ The context is useful as it can pass internal objects around and can
+ control special execution features such as reading data from
+ environment variables.
+
+ A context can be used as context manager in which case it will call
+ :meth:`close` on teardown.
+
+ .. versionadded:: 2.0
+ Added the `resilient_parsing`, `help_option_names`,
+ `token_normalize_func` parameters.
+
+ .. versionadded:: 3.0
+ Added the `allow_extra_args` and `allow_interspersed_args`
+ parameters.
+
+ .. versionadded:: 4.0
+ Added the `color`, `ignore_unknown_options`, and
+ `max_content_width` parameters.
+
+ :param command: the command class for this context.
+ :param parent: the parent context.
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it is usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param obj: an arbitrary object of user data.
+ :param auto_envvar_prefix: the prefix to use for automatic environment
+ variables. If this is `None` then reading
+ from environment variables is disabled. This
+ does not affect manually set environment
+ variables which are always read.
+ :param default_map: a dictionary (like object) with default values
+ for parameters.
+ :param terminal_width: the width of the terminal. The default is
+ inherit from parent context. If no context
+ defines the terminal width then auto
+ detection will be applied.
+ :param max_content_width: the maximum width for content rendered by
+ Click (this currently only affects help
+ pages). This defaults to 80 characters if
+ not overridden. In other words: even if the
+ terminal is larger than that, Click will not
+ format things wider than 80 characters by
+ default. In addition to that, formatters might
+ add some safety mapping on the right.
+ :param resilient_parsing: if this flag is enabled then Click will
+ parse without any interactivity or callback
+ invocation. This is useful for implementing
+ things such as completion support.
+ :param allow_extra_args: if this is set to `True` then extra arguments
+ at the end will not raise an error and will be
+ kept on the context. The default is to inherit
+ from the command.
+ :param allow_interspersed_args: if this is set to `False` then options
+ and arguments cannot be mixed. The
+ default is to inherit from the command.
+ :param ignore_unknown_options: instructs click to ignore options it does
+ not know and keeps them for later
+ processing.
+ :param help_option_names: optionally a list of strings that define how
+ the default help parameter is named. The
+ default is ``['--help']``.
+ :param token_normalize_func: an optional function that is used to
+ normalize tokens (options, choices,
+ etc.). This for instance can be used to
+ implement case insensitive behavior.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are used in texts that Click prints which is by
+ default not the case. This for instance would affect
+ help output.
+ """
+
+ def __init__(self, command, parent=None, info_name=None, obj=None,
+ auto_envvar_prefix=None, default_map=None,
+ terminal_width=None, max_content_width=None,
+ resilient_parsing=False, allow_extra_args=None,
+ allow_interspersed_args=None,
+ ignore_unknown_options=None, help_option_names=None,
+ token_normalize_func=None, color=None):
+ #: the parent context or `None` if none exists.
+ self.parent = parent
+ #: the :class:`Command` for this context.
+ self.command = command
+ #: the descriptive information name
+ self.info_name = info_name
+ #: the parsed parameters except if the value is hidden in which
+ #: case it's not remembered.
+ self.params = {}
+ #: the leftover arguments.
+ self.args = []
+ #: protected arguments. These are arguments that are prepended
+ #: to `args` when certain parsing scenarios are encountered but
+ #: must be never propagated to another arguments. This is used
+ #: to implement nested parsing.
+ self.protected_args = []
+ if obj is None and parent is not None:
+ obj = parent.obj
+ #: the user object stored.
+ self.obj = obj
+ self._meta = getattr(parent, 'meta', {})
+
+ #: A dictionary (-like object) with defaults for parameters.
+ if default_map is None \
+ and parent is not None \
+ and parent.default_map is not None:
+ default_map = parent.default_map.get(info_name)
+ self.default_map = default_map
+
+ #: This flag indicates if a subcommand is going to be executed. A
+ #: group callback can use this information to figure out if it's
+ #: being executed directly or because the execution flow passes
+ #: onwards to a subcommand. By default it's None, but it can be
+ #: the name of the subcommand to execute.
+ #:
+ #: If chaining is enabled this will be set to ``'*'`` in case
+ #: any commands are executed. It is however not possible to
+ #: figure out which ones. If you require this knowledge you
+ #: should use a :func:`resultcallback`.
+ self.invoked_subcommand = None
+
+ if terminal_width is None and parent is not None:
+ terminal_width = parent.terminal_width
+ #: The width of the terminal (None is autodetection).
+ self.terminal_width = terminal_width
+
+ if max_content_width is None and parent is not None:
+ max_content_width = parent.max_content_width
+ #: The maximum width of formatted content (None implies a sensible
+ #: default which is 80 for most things).
+ self.max_content_width = max_content_width
+
+ if allow_extra_args is None:
+ allow_extra_args = command.allow_extra_args
+ #: Indicates if the context allows extra args or if it should
+ #: fail on parsing.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_extra_args = allow_extra_args
+
+ if allow_interspersed_args is None:
+ allow_interspersed_args = command.allow_interspersed_args
+ #: Indicates if the context allows mixing of arguments and
+ #: options or not.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_interspersed_args = allow_interspersed_args
+
+ if ignore_unknown_options is None:
+ ignore_unknown_options = command.ignore_unknown_options
+ #: Instructs click to ignore options that a command does not
+ #: understand and will store it on the context for later
+ #: processing. This is primarily useful for situations where you
+ #: want to call into external programs. Generally this pattern is
+ #: strongly discouraged because it's not possibly to losslessly
+ #: forward all arguments.
+ #:
+ #: .. versionadded:: 4.0
+ self.ignore_unknown_options = ignore_unknown_options
+
+ if help_option_names is None:
+ if parent is not None:
+ help_option_names = parent.help_option_names
+ else:
+ help_option_names = ['--help']
+
+ #: The names for the help options.
+ self.help_option_names = help_option_names
+
+ if token_normalize_func is None and parent is not None:
+ token_normalize_func = parent.token_normalize_func
+
+ #: An optional normalization function for tokens. This is
+ #: options, choices, commands etc.
+ self.token_normalize_func = token_normalize_func
+
+ #: Indicates if resilient parsing is enabled. In that case Click
+ #: will do its best to not cause any failures.
+ self.resilient_parsing = resilient_parsing
+
+ # If there is no envvar prefix yet, but the parent has one and
+ # the command on this level has a name, we can expand the envvar
+ # prefix automatically.
+ if auto_envvar_prefix is None:
+ if parent is not None \
+ and parent.auto_envvar_prefix is not None and \
+ self.info_name is not None:
+ auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
+ self.info_name.upper())
+ else:
+ self.auto_envvar_prefix = auto_envvar_prefix.upper()
+ self.auto_envvar_prefix = auto_envvar_prefix
+
+ if color is None and parent is not None:
+ color = parent.color
+
+ #: Controls if styling output is wanted or not.
+ self.color = color
+
+ self._close_callbacks = []
+ self._depth = 0
+
+ def __enter__(self):
+ self._depth += 1
+ push_context(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self._depth -= 1
+ if self._depth == 0:
+ self.close()
+ pop_context()
+
+ @contextmanager
+ def scope(self, cleanup=True):
+ """This helper method can be used with the context object to promote
+ it to the current thread local (see :func:`get_current_context`).
+ The default behavior of this is to invoke the cleanup functions which
+ can be disabled by setting `cleanup` to `False`. The cleanup
+ functions are typically used for things such as closing file handles.
+
+ If the cleanup is intended the context object can also be directly
+ used as a context manager.
+
+ Example usage::
+
+ with ctx.scope():
+ assert get_current_context() is ctx
+
+ This is equivalent::
+
+ with ctx:
+ assert get_current_context() is ctx
+
+ .. versionadded:: 5.0
+
+ :param cleanup: controls if the cleanup functions should be run or
+ not. The default is to run these functions. In
+ some situations the context only wants to be
+ temporarily pushed in which case this can be disabled.
+ Nested pushes automatically defer the cleanup.
+ """
+ if not cleanup:
+ self._depth += 1
+ try:
+ with self as rv:
+ yield rv
+ finally:
+ if not cleanup:
+ self._depth -= 1
+
+ @property
+ def meta(self):
+ """This is a dictionary which is shared with all the contexts
+ that are nested. It exists so that click utiltiies can store some
+ state here if they need to. It is however the responsibility of
+ that code to manage this dictionary well.
+
+ The keys are supposed to be unique dotted strings. For instance
+ module paths are a good choice for it. What is stored in there is
+ irrelevant for the operation of click. However what is important is
+ that code that places data here adheres to the general semantics of
+ the system.
+
+ Example usage::
+
+ LANG_KEY = __name__ + '.lang'
+
+ def set_language(value):
+ ctx = get_current_context()
+ ctx.meta[LANG_KEY] = value
+
+ def get_language():
+ return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+ .. versionadded:: 5.0
+ """
+ return self._meta
+
+ def make_formatter(self):
+ """Creates the formatter for the help and usage output."""
+ return HelpFormatter(width=self.terminal_width,
+ max_width=self.max_content_width)
+
+ def call_on_close(self, f):
+ """This decorator remembers a function as callback that should be
+ executed when the context tears down. This is most useful to bind
+ resource handling to the script execution. For instance, file objects
+ opened by the :class:`File` type will register their close callbacks
+ here.
+
+ :param f: the function to execute on teardown.
+ """
+ self._close_callbacks.append(f)
+ return f
+
+ def close(self):
+ """Invokes all close callbacks."""
+ for cb in self._close_callbacks:
+ cb()
+ self._close_callbacks = []
+
+ @property
+ def command_path(self):
+ """The computed command path. This is used for the ``usage``
+ information on the help page. It's automatically created by
+ combining the info names of the chain of contexts to the root.
+ """
+ rv = ''
+ if self.info_name is not None:
+ rv = self.info_name
+ if self.parent is not None:
+ rv = self.parent.command_path + ' ' + rv
+ return rv.lstrip()
+
+ def find_root(self):
+ """Finds the outermost context."""
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ def find_object(self, object_type):
+ """Finds the closest object of a given type."""
+ node = self
+ while node is not None:
+ if isinstance(node.obj, object_type):
+ return node.obj
+ node = node.parent
+
+ def ensure_object(self, object_type):
+ """Like :meth:`find_object` but sets the innermost object to a
+ new instance of `object_type` if it does not exist.
+ """
+ rv = self.find_object(object_type)
+ if rv is None:
+ self.obj = rv = object_type()
+ return rv
+
+ def lookup_default(self, name):
+ """Looks up the default for a parameter name. This by default
+ looks into the :attr:`default_map` if available.
+ """
+ if self.default_map is not None:
+ rv = self.default_map.get(name)
+ if callable(rv):
+ rv = rv()
+ return rv
+
+ def fail(self, message):
+ """Aborts the execution of the program with a specific error
+ message.
+
+ :param message: the error message to fail with.
+ """
+ raise UsageError(message, self)
+
+ def abort(self):
+ """Aborts the script."""
+ raise Abort()
+
+ def exit(self, code=0):
+ """Exits the application with a given exit code."""
+ sys.exit(code)
+
+ def get_usage(self):
+ """Helper method to get formatted usage string for the current
+ context and command.
+ """
+ return self.command.get_usage(self)
+
+ def get_help(self):
+ """Helper method to get formatted help page for the current
+ context and command.
+ """
+ return self.command.get_help(self)
+
+ def invoke(*args, **kwargs):
+ """Invokes a command callback in exactly the way it expects. There
+ are two ways to invoke this method:
+
+ 1. the first argument can be a callback and all other arguments and
+ keyword arguments are forwarded directly to the function.
+ 2. the first argument is a click command object. In that case all
+ arguments are forwarded as well but proper click parameters
+ (options and click arguments) must be keyword arguments and Click
+ will fill in defaults.
+
+ Note that before Click 3.2 keyword arguments were not properly filled
+ in against the intention of this code and no context was created. For
+ more information about this change and why it was done in a bugfix
+ release see :ref:`upgrade-to-3.2`.
+ """
+ self, callback = args[:2]
+ ctx = self
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback. In that case we also fill
+ # in defaults and make a new context for this command.
+ if isinstance(callback, Command):
+ other_cmd = callback
+ callback = other_cmd.callback
+ ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
+ if callback is None:
+ raise TypeError('The given command does not have a '
+ 'callback that can be invoked.')
+
+ for param in other_cmd.params:
+ if param.name not in kwargs and param.expose_value:
+ kwargs[param.name] = param.get_default(ctx)
+
+ args = args[2:]
+ with augment_usage_errors(self):
+ with ctx:
+ return callback(*args, **kwargs)
+
+ def forward(*args, **kwargs):
+ """Similar to :meth:`invoke` but fills in default keyword
+ arguments from the current context if the other command expects
+ it. This cannot invoke callbacks directly, only other commands.
+ """
+ self, cmd = args[:2]
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback.
+ if not isinstance(cmd, Command):
+ raise TypeError('Callback is not a command.')
+
+ for param in self.params:
+ if param not in kwargs:
+ kwargs[param] = self.params[param]
+
+ return self.invoke(cmd, **kwargs)
+
+
+class BaseCommand(object):
+ """The base command implements the minimal API contract of commands.
+ Most code will never use this as it does not implement a lot of useful
+ functionality but it can act as the direct subclass of alternative
+ parsing methods that do not depend on the Click parser.
+
+ For instance, this can be used to bridge Click and other systems like
+ argparse or docopt.
+
+ Because base commands do not implement a lot of the API that other
+ parts of Click take for granted, they are not supported for all
+ operations. For instance, they cannot be used with the decorators
+ usually and they have no built-in callback system.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ """
+ #: the default for the :attr:`Context.allow_extra_args` flag.
+ allow_extra_args = False
+ #: the default for the :attr:`Context.allow_interspersed_args` flag.
+ allow_interspersed_args = True
+ #: the default for the :attr:`Context.ignore_unknown_options` flag.
+ ignore_unknown_options = False
+
+ def __init__(self, name, context_settings=None):
+ #: the name the command thinks it has. Upon registering a command
+ #: on a :class:`Group` the group will default the command name
+ #: with this information. You should instead use the
+ #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+ self.name = name
+ if context_settings is None:
+ context_settings = {}
+ #: an optional dictionary with defaults passed to the context.
+ self.context_settings = context_settings
+
+ def get_usage(self, ctx):
+ raise NotImplementedError('Base commands cannot get usage')
+
+ def get_help(self, ctx):
+ raise NotImplementedError('Base commands cannot get help')
+
+ def make_context(self, info_name, args, parent=None, **extra):
+ """This function when given an info name and arguments will kick
+ off the parsing and create a new :class:`Context`. It does not
+ invoke the actual command callback though.
+
+ :param info_name: the info name for this invokation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it's usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param args: the arguments to parse as list of strings.
+ :param parent: the parent context if available.
+ :param extra: extra keyword arguments forwarded to the context
+ constructor.
+ """
+ for key, value in iteritems(self.context_settings):
+ if key not in extra:
+ extra[key] = value
+ ctx = Context(self, info_name=info_name, parent=parent, **extra)
+ with ctx.scope(cleanup=False):
+ self.parse_args(ctx, args)
+ return ctx
+
+ def parse_args(self, ctx, args):
+ """Given a context and a list of arguments this creates the parser
+ and parses the arguments, then modifies the context as necessary.
+ This is automatically invoked by :meth:`make_context`.
+ """
+ raise NotImplementedError('Base commands do not know how to parse '
+ 'arguments.')
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the command. The default
+ implementation is raising a not implemented error.
+ """
+ raise NotImplementedError('Base commands are not invokable by default')
+
+ def main(self, args=None, prog_name=None, complete_var=None,
+ standalone_mode=True, **extra):
+ """This is the way to invoke a script with all the bells and
+ whistles as a command line application. This will always terminate
+ the application after a call. If this is not wanted, ``SystemExit``
+ needs to be caught.
+
+ This method is also available by directly calling the instance of
+ a :class:`Command`.
+
+ .. versionadded:: 3.0
+ Added the `standalone_mode` flag to control the standalone mode.
+
+ :param args: the arguments that should be used for parsing. If not
+ provided, ``sys.argv[1:]`` is used.
+ :param prog_name: the program name that should be used. By default
+ the program name is constructed by taking the file
+ name from ``sys.argv[0]``.
+ :param complete_var: the environment variable that controls the
+ bash completion support. The default is
+ ``"__COMPLETE"`` with prog name in
+ uppercase.
+ :param standalone_mode: the default behavior is to invoke the script
+ in standalone mode. Click will then
+ handle exceptions and convert them into
+ error messages and the function will never
+ return but shut down the interpreter. If
+ this is set to `False` they will be
+ propagated to the caller and the return
+ value of this function is the return value
+ of :meth:`invoke`.
+ :param extra: extra keyword arguments are forwarded to the context
+ constructor. See :class:`Context` for more information.
+ """
+ # If we are in Python 3, we will verify that the environment is
+ # sane at this point of reject further execution to avoid a
+ # broken script.
+ if not PY2:
+ _verify_python3_env()
+ else:
+ _check_for_unicode_literals()
+
+ if args is None:
+ args = get_os_args()
+ else:
+ args = list(args)
+
+ if prog_name is None:
+ prog_name = make_str(os.path.basename(
+ sys.argv and sys.argv[0] or __file__))
+
+ # Hook for the Bash completion. This only activates if the Bash
+ # completion is actually enabled, otherwise this is quite a fast
+ # noop.
+ _bashcomplete(self, prog_name, complete_var)
+
+ try:
+ try:
+ with self.make_context(prog_name, args, **extra) as ctx:
+ rv = self.invoke(ctx)
+ if not standalone_mode:
+ return rv
+ ctx.exit()
+ except (EOFError, KeyboardInterrupt):
+ echo(file=sys.stderr)
+ raise Abort()
+ except ClickException as e:
+ if not standalone_mode:
+ raise
+ e.show()
+ sys.exit(e.exit_code)
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ sys.exit(1)
+ else:
+ raise
+ except Abort:
+ if not standalone_mode:
+ raise
+ echo('Aborted!', file=sys.stderr)
+ sys.exit(1)
+
+ def __call__(self, *args, **kwargs):
+ """Alias for :meth:`main`."""
+ return self.main(*args, **kwargs)
+
+
+class Command(BaseCommand):
+ """Commands are the basic building block of command line interfaces in
+ Click. A basic command handles command line parsing and might dispatch
+ more parsing to commands nested below it.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ :param callback: the callback to invoke. This is optional.
+ :param params: the parameters to register with this command. This can
+ be either :class:`Option` or :class:`Argument` objects.
+ :param help: the help string to use for this command.
+ :param epilog: like the help string but it's printed at the end of the
+ help page after everything else.
+ :param short_help: the short help to use for this command. This is
+ shown on the command listing of the parent command.
+ :param add_help_option: by default each command registers a ``--help``
+ option. This can be disabled by this parameter.
+ """
+
+ def __init__(self, name, context_settings=None, callback=None,
+ params=None, help=None, epilog=None, short_help=None,
+ options_metavar='[OPTIONS]', add_help_option=True):
+ BaseCommand.__init__(self, name, context_settings)
+ #: the callback to execute when the command fires. This might be
+ #: `None` in which case nothing happens.
+ self.callback = callback
+ #: the list of parameters for this command in the order they
+ #: should show up in the help page and execute. Eager parameters
+ #: will automatically be handled before non eager ones.
+ self.params = params or []
+ self.help = help
+ self.epilog = epilog
+ self.options_metavar = options_metavar
+ if short_help is None and help:
+ short_help = make_default_short_help(help)
+ self.short_help = short_help
+ self.add_help_option = add_help_option
+
+ def get_usage(self, ctx):
+ formatter = ctx.make_formatter()
+ self.format_usage(ctx, formatter)
+ return formatter.getvalue().rstrip('\n')
+
+ def get_params(self, ctx):
+ rv = self.params
+ help_option = self.get_help_option(ctx)
+ if help_option is not None:
+ rv = rv + [help_option]
+ return rv
+
+ def format_usage(self, ctx, formatter):
+ """Writes the usage line into the formatter."""
+ pieces = self.collect_usage_pieces(ctx)
+ formatter.write_usage(ctx.command_path, ' '.join(pieces))
+
+ def collect_usage_pieces(self, ctx):
+ """Returns all the pieces that go into the usage line and returns
+ it as a list of strings.
+ """
+ rv = [self.options_metavar]
+ for param in self.get_params(ctx):
+ rv.extend(param.get_usage_pieces(ctx))
+ return rv
+
+ def get_help_option_names(self, ctx):
+ """Returns the names for the help option."""
+ all_names = set(ctx.help_option_names)
+ for param in self.params:
+ all_names.difference_update(param.opts)
+ all_names.difference_update(param.secondary_opts)
+ return all_names
+
+ def get_help_option(self, ctx):
+ """Returns the help option object."""
+ help_options = self.get_help_option_names(ctx)
+ if not help_options or not self.add_help_option:
+ return
+
+ def show_help(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+ return Option(help_options, is_flag=True,
+ is_eager=True, expose_value=False,
+ callback=show_help,
+ help='Show this message and exit.')
+
+ def make_parser(self, ctx):
+ """Creates the underlying option parser for this command."""
+ parser = OptionParser(ctx)
+ parser.allow_interspersed_args = ctx.allow_interspersed_args
+ parser.ignore_unknown_options = ctx.ignore_unknown_options
+ for param in self.get_params(ctx):
+ param.add_to_parser(parser, ctx)
+ return parser
+
+ def get_help(self, ctx):
+ """Formats the help into a string and returns it. This creates a
+ formatter and will call into the following formatting methods:
+ """
+ formatter = ctx.make_formatter()
+ self.format_help(ctx, formatter)
+ return formatter.getvalue().rstrip('\n')
+
+ def format_help(self, ctx, formatter):
+ """Writes the help into the formatter if it exists.
+
+ This calls into the following methods:
+
+ - :meth:`format_usage`
+ - :meth:`format_help_text`
+ - :meth:`format_options`
+ - :meth:`format_epilog`
+ """
+ self.format_usage(ctx, formatter)
+ self.format_help_text(ctx, formatter)
+ self.format_options(ctx, formatter)
+ self.format_epilog(ctx, formatter)
+
+ def format_help_text(self, ctx, formatter):
+ """Writes the help text to the formatter if it exists."""
+ if self.help:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.help)
+
+ def format_options(self, ctx, formatter):
+ """Writes all the options into the formatter if they exist."""
+ opts = []
+ for param in self.get_params(ctx):
+ rv = param.get_help_record(ctx)
+ if rv is not None:
+ opts.append(rv)
+
+ if opts:
+ with formatter.section('Options'):
+ formatter.write_dl(opts)
+
+ def format_epilog(self, ctx, formatter):
+ """Writes the epilog into the formatter if it exists."""
+ if self.epilog:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.epilog)
+
+ def parse_args(self, ctx, args):
+ parser = self.make_parser(ctx)
+ opts, args, param_order = parser.parse_args(args=args)
+
+ for param in iter_params_for_processing(
+ param_order, self.get_params(ctx)):
+ value, args = param.handle_parse_result(ctx, opts, args)
+
+ if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+ ctx.fail('Got unexpected extra argument%s (%s)'
+ % (len(args) != 1 and 's' or '',
+ ' '.join(map(make_str, args))))
+
+ ctx.args = args
+ return args
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the attached callback (if it exists)
+ in the right way.
+ """
+ if self.callback is not None:
+ return ctx.invoke(self.callback, **ctx.params)
+
+
+class MultiCommand(Command):
+ """A multi command is the basic implementation of a command that
+ dispatches to subcommands. The most common version is the
+ :class:`Group`.
+
+ :param invoke_without_command: this controls how the multi command itself
+ is invoked. By default it's only invoked
+ if a subcommand is provided.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is enabled by default if
+ `invoke_without_command` is disabled or disabled
+ if it's enabled. If enabled this will add
+ ``--help`` as argument if no arguments are
+ passed.
+ :param subcommand_metavar: the string that is used in the documentation
+ to indicate the subcommand place.
+ :param chain: if this is set to `True` chaining of multiple subcommands
+ is enabled. This restricts the form of commands in that
+ they cannot have optional arguments but it allows
+ multiple commands to be chained together.
+ :param result_callback: the result callback to attach to this multi
+ command.
+ """
+ allow_extra_args = True
+ allow_interspersed_args = False
+
+ def __init__(self, name=None, invoke_without_command=False,
+ no_args_is_help=None, subcommand_metavar=None,
+ chain=False, result_callback=None, **attrs):
+ Command.__init__(self, name, **attrs)
+ if no_args_is_help is None:
+ no_args_is_help = not invoke_without_command
+ self.no_args_is_help = no_args_is_help
+ self.invoke_without_command = invoke_without_command
+ if subcommand_metavar is None:
+ if chain:
+ subcommand_metavar = SUBCOMMANDS_METAVAR
+ else:
+ subcommand_metavar = SUBCOMMAND_METAVAR
+ self.subcommand_metavar = subcommand_metavar
+ self.chain = chain
+ #: The result callback that is stored. This can be set or
+ #: overridden with the :func:`resultcallback` decorator.
+ self.result_callback = result_callback
+
+ if self.chain:
+ for param in self.params:
+ if isinstance(param, Argument) and not param.required:
+ raise RuntimeError('Multi commands in chain mode cannot '
+ 'have optional arguments.')
+
+ def collect_usage_pieces(self, ctx):
+ rv = Command.collect_usage_pieces(self, ctx)
+ rv.append(self.subcommand_metavar)
+ return rv
+
+ def format_options(self, ctx, formatter):
+ Command.format_options(self, ctx, formatter)
+ self.format_commands(ctx, formatter)
+
+ def resultcallback(self, replace=False):
+ """Adds a result callback to the chain command. By default if a
+ result callback is already registered this will chain them but
+ this can be disabled with the `replace` parameter. The result
+ callback is invoked with the return value of the subcommand
+ (or the list of return values from all subcommands if chaining
+ is enabled) as well as the parameters as they would be passed
+ to the main callback.
+
+ Example::
+
+ @click.group()
+ @click.option('-i', '--input', default=23)
+ def cli(input):
+ return 42
+
+ @cli.resultcallback()
+ def process_result(result, input):
+ return result + input
+
+ .. versionadded:: 3.0
+
+ :param replace: if set to `True` an already existing result
+ callback will be removed.
+ """
+ def decorator(f):
+ old_callback = self.result_callback
+ if old_callback is None or replace:
+ self.result_callback = f
+ return f
+ def function(__value, *args, **kwargs):
+ return f(old_callback(__value, *args, **kwargs),
+ *args, **kwargs)
+ self.result_callback = rv = update_wrapper(function, f)
+ return rv
+ return decorator
+
+ def format_commands(self, ctx, formatter):
+ """Extra format methods for multi methods that adds all the commands
+ after the options.
+ """
+ rows = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+
+ help = cmd.short_help or ''
+ rows.append((subcommand, help))
+
+ if rows:
+ with formatter.section('Commands'):
+ formatter.write_dl(rows)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ rest = Command.parse_args(self, ctx, args)
+ if self.chain:
+ ctx.protected_args = rest
+ ctx.args = []
+ elif rest:
+ ctx.protected_args, ctx.args = rest[:1], rest[1:]
+
+ return ctx.args
+
+ def invoke(self, ctx):
+ def _process_result(value):
+ if self.result_callback is not None:
+ value = ctx.invoke(self.result_callback, value,
+ **ctx.params)
+ return value
+
+ if not ctx.protected_args:
+ # If we are invoked without command the chain flag controls
+ # how this happens. If we are not in chain mode, the return
+ # value here is the return value of the command.
+ # If however we are in chain mode, the return value is the
+ # return value of the result processor invoked with an empty
+ # list (which means that no subcommand actually was executed).
+ if self.invoke_without_command:
+ if not self.chain:
+ return Command.invoke(self, ctx)
+ with ctx:
+ Command.invoke(self, ctx)
+ return _process_result([])
+ ctx.fail('Missing command.')
+
+ # Fetch args back out
+ args = ctx.protected_args + ctx.args
+ ctx.args = []
+ ctx.protected_args = []
+
+ # If we're not in chain mode, we only allow the invocation of a
+ # single command but we also inform the current context about the
+ # name of the command to invoke.
+ if not self.chain:
+ # Make sure the context is entered so we do not clean up
+ # resources until the result processor has worked.
+ with ctx:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ ctx.invoked_subcommand = cmd_name
+ Command.invoke(self, ctx)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+ with sub_ctx:
+ return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+ # In chain mode we create the contexts step by step, but after the
+ # base command has been invoked. Because at that point we do not
+ # know the subcommands yet, the invoked subcommand attribute is
+ # set to ``*`` to inform the command that subcommands are executed
+ # but nothing else.
+ with ctx:
+ ctx.invoked_subcommand = args and '*' or None
+ Command.invoke(self, ctx)
+
+ # Otherwise we make every single context and invoke them in a
+ # chain. In that case the return value to the result processor
+ # is the list of all invoked subcommand's results.
+ contexts = []
+ while args:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False)
+ contexts.append(sub_ctx)
+ args, sub_ctx.args = sub_ctx.args, []
+
+ rv = []
+ for sub_ctx in contexts:
+ with sub_ctx:
+ rv.append(sub_ctx.command.invoke(sub_ctx))
+ return _process_result(rv)
+
+ def resolve_command(self, ctx, args):
+ cmd_name = make_str(args[0])
+ original_cmd_name = cmd_name
+
+ # Get the command
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we can't find the command but there is a normalization
+ # function available, we try with that one.
+ if cmd is None and ctx.token_normalize_func is not None:
+ cmd_name = ctx.token_normalize_func(cmd_name)
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we don't find the command we want to show an error message
+ # to the user that it was not provided. However, there is
+ # something else we should do: if the first argument looks like
+ # an option we want to kick off parsing again for arguments to
+ # resolve things like --help which now should go to the main
+ # place.
+ if cmd is None:
+ if split_opt(cmd_name)[0]:
+ self.parse_args(ctx, ctx.args)
+ ctx.fail('No such command "%s".' % original_cmd_name)
+
+ return cmd_name, cmd, args[1:]
+
+ def get_command(self, ctx, cmd_name):
+ """Given a context and a command name, this returns a
+ :class:`Command` object if it exists or returns `None`.
+ """
+ raise NotImplementedError()
+
+ def list_commands(self, ctx):
+ """Returns a list of subcommand names in the order they should
+ appear.
+ """
+ return []
+
+
+class Group(MultiCommand):
+ """A group allows a command to have subcommands attached. This is the
+ most common way to implement nesting in Click.
+
+ :param commands: a dictionary of commands.
+ """
+
+ def __init__(self, name=None, commands=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: the registered subcommands by their exported names.
+ self.commands = commands or {}
+
+ def add_command(self, cmd, name=None):
+ """Registers another :class:`Command` with this group. If the name
+ is not provided, the name of the command is used.
+ """
+ name = name or cmd.name
+ if name is None:
+ raise TypeError('Command has no name.')
+ _check_multicommand(self, name, cmd, register=True)
+ self.commands[name] = cmd
+
+ def command(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a command to
+ the group. This takes the same arguments as :func:`command` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ def decorator(f):
+ cmd = command(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a group to
+ the group. This takes the same arguments as :func:`group` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ def decorator(f):
+ cmd = group(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+ return decorator
+
+ def get_command(self, ctx, cmd_name):
+ return self.commands.get(cmd_name)
+
+ def list_commands(self, ctx):
+ return sorted(self.commands)
+
+
+class CommandCollection(MultiCommand):
+ """A command collection is a multi command that merges multiple multi
+ commands together into one. This is a straightforward implementation
+ that accepts a list of different multi commands as sources and
+ provides all the commands for each of them.
+ """
+
+ def __init__(self, name=None, sources=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: The list of registered multi commands.
+ self.sources = sources or []
+
+ def add_source(self, multi_cmd):
+ """Adds a new multi command to the chain dispatcher."""
+ self.sources.append(multi_cmd)
+
+ def get_command(self, ctx, cmd_name):
+ for source in self.sources:
+ rv = source.get_command(ctx, cmd_name)
+ if rv is not None:
+ if self.chain:
+ _check_multicommand(self, cmd_name, rv)
+ return rv
+
+ def list_commands(self, ctx):
+ rv = set()
+ for source in self.sources:
+ rv.update(source.list_commands(ctx))
+ return sorted(rv)
+
+
+class Parameter(object):
+ """A parameter to a command comes in two versions: they are either
+ :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
+ not supported by design as some of the internals for parsing are
+ intentionally not finalized.
+
+ Some settings are supported by both options and arguments.
+
+ .. versionchanged:: 2.0
+ Changed signature for parameter callback to also be passed the
+ parameter. In Click 2.0, the old callback format will still work,
+ but it will raise a warning to give you change to migrate the
+ code easier.
+
+ :param param_decls: the parameter declarations for this option or
+ argument. This is a list of flags or argument
+ names.
+ :param type: the type that should be used. Either a :class:`ParamType`
+ or a Python type. The later is converted into the former
+ automatically if supported.
+ :param required: controls if this is optional or not.
+ :param default: the default value if omitted. This can also be a callable,
+ in which case it's invoked when the default is needed
+ without any arguments.
+ :param callback: a callback that should be executed after the parameter
+ was matched. This is called as ``fn(ctx, param,
+ value)`` and needs to return the value. Before Click
+ 2.0, the signature was ``(ctx, value)``.
+ :param nargs: the number of arguments to match. If not ``1`` the return
+ value is a tuple instead of single value. The default for
+ nargs is ``1`` (except if the type is a tuple, then it's
+ the arity of the tuple).
+ :param metavar: how the value is represented in the help page.
+ :param expose_value: if this is `True` then the value is passed onwards
+ to the command callback and stored on the context,
+ otherwise it's skipped.
+ :param is_eager: eager values are processed before non eager ones. This
+ should not be set for arguments or it will inverse the
+ order of processing.
+ :param envvar: a string or list of strings that are environment variables
+ that should be checked.
+ """
+ param_type_name = 'parameter'
+
+ def __init__(self, param_decls=None, type=None, required=False,
+ default=None, callback=None, nargs=None, metavar=None,
+ expose_value=True, is_eager=False, envvar=None):
+ self.name, self.opts, self.secondary_opts = \
+ self._parse_decls(param_decls or (), expose_value)
+
+ self.type = convert_type(type, default)
+
+ # Default nargs to what the type tells us if we have that
+ # information available.
+ if nargs is None:
+ if self.type.is_composite:
+ nargs = self.type.arity
+ else:
+ nargs = 1
+
+ self.required = required
+ self.callback = callback
+ self.nargs = nargs
+ self.multiple = False
+ self.expose_value = expose_value
+ self.default = default
+ self.is_eager = is_eager
+ self.metavar = metavar
+ self.envvar = envvar
+
+ @property
+ def human_readable_name(self):
+ """Returns the human readable name of this parameter. This is the
+ same as the name for options, but the metavar for arguments.
+ """
+ return self.name
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ metavar = self.type.get_metavar(self)
+ if metavar is None:
+ metavar = self.type.name.upper()
+ if self.nargs != 1:
+ metavar += '...'
+ return metavar
+
+ def get_default(self, ctx):
+ """Given a context variable this calculates the default value."""
+ # Otherwise go with the regular default.
+ if callable(self.default):
+ rv = self.default()
+ else:
+ rv = self.default
+ return self.type_cast_value(ctx, rv)
+
+ def add_to_parser(self, parser, ctx):
+ pass
+
+ def consume_value(self, ctx, opts):
+ value = opts.get(self.name)
+ if value is None:
+ value = ctx.lookup_default(self.name)
+ if value is None:
+ value = self.value_from_envvar(ctx)
+ return value
+
+ def type_cast_value(self, ctx, value):
+ """Given a value this runs it properly through the type system.
+ This automatically handles things like `nargs` and `multiple` as
+ well as composite types.
+ """
+ if self.type.is_composite:
+ if self.nargs <= 1:
+ raise TypeError('Attempted to invoke composite type '
+ 'but nargs has been set to %s. This is '
+ 'not supported; nargs needs to be set to '
+ 'a fixed value > 1.' % self.nargs)
+ if self.multiple:
+ return tuple(self.type(x or (), self, ctx) for x in value or ())
+ return self.type(value or (), self, ctx)
+
+ def _convert(value, level):
+ if level == 0:
+ return self.type(value, self, ctx)
+ return tuple(_convert(x, level - 1) for x in value or ())
+ return _convert(value, (self.nargs != 1) + bool(self.multiple))
+
+ def process_value(self, ctx, value):
+ """Given a value and context this runs the logic to convert the
+ value as necessary.
+ """
+ # If the value we were given is None we do nothing. This way
+ # code that calls this can easily figure out if something was
+ # not provided. Otherwise it would be converted into an empty
+ # tuple for multiple invocations which is inconvenient.
+ if value is not None:
+ return self.type_cast_value(ctx, value)
+
+ def value_is_missing(self, value):
+ if value is None:
+ return True
+ if (self.nargs != 1 or self.multiple) and value == ():
+ return True
+ return False
+
+ def full_process_value(self, ctx, value):
+ value = self.process_value(ctx, value)
+
+ if value is None:
+ value = self.get_default(ctx)
+
+ if self.required and self.value_is_missing(value):
+ raise MissingParameter(ctx=ctx, param=self)
+
+ return value
+
+ def resolve_envvar_value(self, ctx):
+ if self.envvar is None:
+ return
+ if isinstance(self.envvar, (tuple, list)):
+ for envvar in self.envvar:
+ rv = os.environ.get(envvar)
+ if rv is not None:
+ return rv
+ else:
+ return os.environ.get(self.envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is not None and self.nargs != 1:
+ rv = self.type.split_envvar_value(rv)
+ return rv
+
+ def handle_parse_result(self, ctx, opts, args):
+ with augment_usage_errors(ctx, param=self):
+ value = self.consume_value(ctx, opts)
+ try:
+ value = self.full_process_value(ctx, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+ value = None
+ if self.callback is not None:
+ try:
+ value = invoke_param_callback(
+ self.callback, ctx, self, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+
+ if self.expose_value:
+ ctx.params[self.name] = value
+ return value, args
+
+ def get_help_record(self, ctx):
+ pass
+
+ def get_usage_pieces(self, ctx):
+ return []
+
+
+class Option(Parameter):
+ """Options are usually optional values on the command line and
+ have some extra features that arguments don't have.
+
+ All other parameters are passed onwards to the parameter constructor.
+
+ :param show_default: controls if the default value should be shown on the
+ help page. Normally, defaults are not shown.
+ :param prompt: if set to `True` or a non empty string then the user will
+ be prompted for input if not set. If set to `True` the
+ prompt will be the option name capitalized.
+ :param confirmation_prompt: if set then the value will need to be confirmed
+ if it was prompted for.
+ :param hide_input: if this is `True` then the input on the prompt will be
+ hidden from the user. This is useful for password
+ input.
+ :param is_flag: forces this option to act as a flag. The default is
+ auto detection.
+ :param flag_value: which value should be used for this flag if it's
+ enabled. This is set to a boolean automatically if
+ the option string contains a slash to mark two options.
+ :param multiple: if this is set to `True` then the argument is accepted
+ multiple times and recorded. This is similar to ``nargs``
+ in how it works but supports arbitrary number of
+ arguments.
+ :param count: this flag makes an option increment an integer.
+ :param allow_from_autoenv: if this is enabled then the value of this
+ parameter will be pulled from an environment
+ variable in case a prefix is defined on the
+ context.
+ :param help: the help string.
+ """
+ param_type_name = 'option'
+
+ def __init__(self, param_decls=None, show_default=False,
+ prompt=False, confirmation_prompt=False,
+ hide_input=False, is_flag=None, flag_value=None,
+ multiple=False, count=False, allow_from_autoenv=True,
+ type=None, help=None, **attrs):
+ default_is_missing = attrs.get('default', _missing) is _missing
+ Parameter.__init__(self, param_decls, type=type, **attrs)
+
+ if prompt is True:
+ prompt_text = self.name.replace('_', ' ').capitalize()
+ elif prompt is False:
+ prompt_text = None
+ else:
+ prompt_text = prompt
+ self.prompt = prompt_text
+ self.confirmation_prompt = confirmation_prompt
+ self.hide_input = hide_input
+
+ # Flags
+ if is_flag is None:
+ if flag_value is not None:
+ is_flag = True
+ else:
+ is_flag = bool(self.secondary_opts)
+ if is_flag and default_is_missing:
+ self.default = False
+ if flag_value is None:
+ flag_value = not self.default
+ self.is_flag = is_flag
+ self.flag_value = flag_value
+ if self.is_flag and isinstance(self.flag_value, bool) \
+ and type is None:
+ self.type = BOOL
+ self.is_bool_flag = True
+ else:
+ self.is_bool_flag = False
+
+ # Counting
+ self.count = count
+ if count:
+ if type is None:
+ self.type = IntRange(min=0)
+ if default_is_missing:
+ self.default = 0
+
+ self.multiple = multiple
+ self.allow_from_autoenv = allow_from_autoenv
+ self.help = help
+ self.show_default = show_default
+
+ # Sanity check for stuff we don't support
+ if __debug__:
+ if self.nargs < 0:
+ raise TypeError('Options cannot have nargs < 0')
+ if self.prompt and self.is_flag and not self.is_bool_flag:
+ raise TypeError('Cannot prompt for flags that are not bools.')
+ if not self.is_bool_flag and self.secondary_opts:
+ raise TypeError('Got secondary option for non boolean flag.')
+ if self.is_bool_flag and self.hide_input \
+ and self.prompt is not None:
+ raise TypeError('Hidden input does not work with boolean '
+ 'flag prompts.')
+ if self.count:
+ if self.multiple:
+ raise TypeError('Options cannot be multiple and count '
+ 'at the same time.')
+ elif self.is_flag:
+ raise TypeError('Options cannot be count and flags at '
+ 'the same time.')
+
+ def _parse_decls(self, decls, expose_value):
+ opts = []
+ secondary_opts = []
+ name = None
+ possible_names = []
+
+ for decl in decls:
+ if isidentifier(decl):
+ if name is not None:
+ raise TypeError('Name defined twice')
+ name = decl
+ else:
+ split_char = decl[:1] == '/' and ';' or '/'
+ if split_char in decl:
+ first, second = decl.split(split_char, 1)
+ first = first.rstrip()
+ if first:
+ possible_names.append(split_opt(first))
+ opts.append(first)
+ second = second.lstrip()
+ if second:
+ secondary_opts.append(second.lstrip())
+ else:
+ possible_names.append(split_opt(decl))
+ opts.append(decl)
+
+ if name is None and possible_names:
+ possible_names.sort(key=lambda x: len(x[0]))
+ name = possible_names[-1][1].replace('-', '_').lower()
+ if not isidentifier(name):
+ name = None
+
+ if name is None:
+ if not expose_value:
+ return None, opts, secondary_opts
+ raise TypeError('Could not determine name for option')
+
+ if not opts and not secondary_opts:
+ raise TypeError('No options defined but a name was passed (%s). '
+ 'Did you mean to declare an argument instead '
+ 'of an option?' % name)
+
+ return name, opts, secondary_opts
+
+ def add_to_parser(self, parser, ctx):
+ kwargs = {
+ 'dest': self.name,
+ 'nargs': self.nargs,
+ 'obj': self,
+ }
+
+ if self.multiple:
+ action = 'append'
+ elif self.count:
+ action = 'count'
+ else:
+ action = 'store'
+
+ if self.is_flag:
+ kwargs.pop('nargs', None)
+ if self.is_bool_flag and self.secondary_opts:
+ parser.add_option(self.opts, action=action + '_const',
+ const=True, **kwargs)
+ parser.add_option(self.secondary_opts, action=action +
+ '_const', const=False, **kwargs)
+ else:
+ parser.add_option(self.opts, action=action + '_const',
+ const=self.flag_value,
+ **kwargs)
+ else:
+ kwargs['action'] = action
+ parser.add_option(self.opts, **kwargs)
+
+ def get_help_record(self, ctx):
+ any_prefix_is_slash = []
+
+ def _write_opts(opts):
+ rv, any_slashes = join_options(opts)
+ if any_slashes:
+ any_prefix_is_slash[:] = [True]
+ if not self.is_flag and not self.count:
+ rv += ' ' + self.make_metavar()
+ return rv
+
+ rv = [_write_opts(self.opts)]
+ if self.secondary_opts:
+ rv.append(_write_opts(self.secondary_opts))
+
+ help = self.help or ''
+ extra = []
+ if self.default is not None and self.show_default:
+ extra.append('default: %s' % (
+ ', '.join('%s' % d for d in self.default)
+ if isinstance(self.default, (list, tuple))
+ else self.default, ))
+ if self.required:
+ extra.append('required')
+ if extra:
+ help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
+
+ return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
+
+ def get_default(self, ctx):
+ # If we're a non boolean flag out default is more complex because
+ # we need to look at all flags in the same group to figure out
+ # if we're the the default one in which case we return the flag
+ # value as default.
+ if self.is_flag and not self.is_bool_flag:
+ for param in ctx.command.params:
+ if param.name == self.name and param.default:
+ return param.flag_value
+ return None
+ return Parameter.get_default(self, ctx)
+
+ def prompt_for_value(self, ctx):
+ """This is an alternative flow that can be activated in the full
+ value processing if a value does not exist. It will prompt the
+ user until a valid value exists and then returns the processed
+ value as result.
+ """
+ # Calculate the default before prompting anything to be stable.
+ default = self.get_default(ctx)
+
+ # If this is a prompt for a flag we need to handle this
+ # differently.
+ if self.is_bool_flag:
+ return confirm(self.prompt, default)
+
+ return prompt(self.prompt, default=default,
+ hide_input=self.hide_input,
+ confirmation_prompt=self.confirmation_prompt,
+ value_proc=lambda x: self.process_value(ctx, x))
+
+ def resolve_envvar_value(self, ctx):
+ rv = Parameter.resolve_envvar_value(self, ctx)
+ if rv is not None:
+ return rv
+ if self.allow_from_autoenv and \
+ ctx.auto_envvar_prefix is not None:
+ envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
+ return os.environ.get(envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is None:
+ return None
+ value_depth = (self.nargs != 1) + bool(self.multiple)
+ if value_depth > 0 and rv is not None:
+ rv = self.type.split_envvar_value(rv)
+ if self.multiple and self.nargs != 1:
+ rv = batch(rv, self.nargs)
+ return rv
+
+ def full_process_value(self, ctx, value):
+ if value is None and self.prompt is not None \
+ and not ctx.resilient_parsing:
+ return self.prompt_for_value(ctx)
+ return Parameter.full_process_value(self, ctx, value)
+
+
+class Argument(Parameter):
+ """Arguments are positional parameters to a command. They generally
+ provide fewer features than options but can have infinite ``nargs``
+ and are required by default.
+
+ All parameters are passed onwards to the parameter constructor.
+ """
+ param_type_name = 'argument'
+
+ def __init__(self, param_decls, required=None, **attrs):
+ if required is None:
+ if attrs.get('default') is not None:
+ required = False
+ else:
+ required = attrs.get('nargs', 1) > 0
+ Parameter.__init__(self, param_decls, required=required, **attrs)
+ if self.default is not None and self.nargs < 0:
+ raise TypeError('nargs=-1 in combination with a default value '
+ 'is not supported.')
+
+ @property
+ def human_readable_name(self):
+ if self.metavar is not None:
+ return self.metavar
+ return self.name.upper()
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ var = self.name.upper()
+ if not self.required:
+ var = '[%s]' % var
+ if self.nargs != 1:
+ var += '...'
+ return var
+
+ def _parse_decls(self, decls, expose_value):
+ if not decls:
+ if not expose_value:
+ return None, [], []
+ raise TypeError('Could not determine name for argument')
+ if len(decls) == 1:
+ name = arg = decls[0]
+ name = name.replace('-', '_').lower()
+ elif len(decls) == 2:
+ name, arg = decls
+ else:
+ raise TypeError('Arguments take exactly one or two '
+ 'parameter declarations, got %d' % len(decls))
+ return name, [arg], []
+
+ def get_usage_pieces(self, ctx):
+ return [self.make_metavar()]
+
+ def add_to_parser(self, parser, ctx):
+ parser.add_argument(dest=self.name, nargs=self.nargs,
+ obj=self)
+
+
+# Circular dependency between decorators and core
+from .decorators import command, group
diff --git a/app/lib/click/decorators.py b/app/lib/click/decorators.py
new file mode 100644
index 0000000..9893452
--- /dev/null
+++ b/app/lib/click/decorators.py
@@ -0,0 +1,304 @@
+import sys
+import inspect
+
+from functools import update_wrapper
+
+from ._compat import iteritems
+from ._unicodefun import _check_for_unicode_literals
+from .utils import echo
+from .globals import get_current_context
+
+
+def pass_context(f):
+ """Marks a callback as wanting to receive the current context
+ object as first argument.
+ """
+ def new_func(*args, **kwargs):
+ return f(get_current_context(), *args, **kwargs)
+ return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+ """Similar to :func:`pass_context`, but only pass the object on the
+ context onwards (:attr:`Context.obj`). This is useful if that object
+ represents the state of a nested system.
+ """
+ def new_func(*args, **kwargs):
+ return f(get_current_context().obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+ """Given an object type this creates a decorator that will work
+ similar to :func:`pass_obj` but instead of passing the object of the
+ current context, it will find the innermost context of type
+ :func:`object_type`.
+
+ This generates a decorator that works roughly like this::
+
+ from functools import update_wrapper
+
+ def decorator(f):
+ @pass_context
+ def new_func(ctx, *args, **kwargs):
+ obj = ctx.find_object(object_type)
+ return ctx.invoke(f, obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+ :param object_type: the type of the object to pass.
+ :param ensure: if set to `True`, a new object will be created and
+ remembered on the context if it's not there yet.
+ """
+ def decorator(f):
+ def new_func(*args, **kwargs):
+ ctx = get_current_context()
+ if ensure:
+ obj = ctx.ensure_object(object_type)
+ else:
+ obj = ctx.find_object(object_type)
+ if obj is None:
+ raise RuntimeError('Managed to invoke callback without a '
+ 'context object of type %r existing'
+ % object_type.__name__)
+ return ctx.invoke(f, obj, *args[1:], **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+
+def _make_command(f, name, attrs, cls):
+ if isinstance(f, Command):
+ raise TypeError('Attempted to convert a callback into a '
+ 'command twice.')
+ try:
+ params = f.__click_params__
+ params.reverse()
+ del f.__click_params__
+ except AttributeError:
+ params = []
+ help = attrs.get('help')
+ if help is None:
+ help = inspect.getdoc(f)
+ if isinstance(help, bytes):
+ help = help.decode('utf-8')
+ else:
+ help = inspect.cleandoc(help)
+ attrs['help'] = help
+ _check_for_unicode_literals()
+ return cls(name=name or f.__name__.lower(),
+ callback=f, params=params, **attrs)
+
+
+def command(name=None, cls=None, **attrs):
+ """Creates a new :class:`Command` and uses the decorated function as
+ callback. This will also automatically attach all decorated
+ :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+ The name of the command defaults to the name of the function. If you
+ want to change that, you can pass the intended name as the first
+ argument.
+
+ All keyword arguments are forwarded to the underlying command class.
+
+ Once decorated the function turns into a :class:`Command` instance
+ that can be invoked as a command line utility or be attached to a
+ command :class:`Group`.
+
+ :param name: the name of the command. This defaults to the function
+ name.
+ :param cls: the command class to instantiate. This defaults to
+ :class:`Command`.
+ """
+ if cls is None:
+ cls = Command
+ def decorator(f):
+ cmd = _make_command(f, name, attrs, cls)
+ cmd.__doc__ = f.__doc__
+ return cmd
+ return decorator
+
+
+def group(name=None, **attrs):
+ """Creates a new :class:`Group` with a function as callback. This
+ works otherwise the same as :func:`command` just that the `cls`
+ parameter is set to :class:`Group`.
+ """
+ attrs.setdefault('cls', Group)
+ return command(name, **attrs)
+
+
+def _param_memo(f, param):
+ if isinstance(f, Command):
+ f.params.append(param)
+ else:
+ if not hasattr(f, '__click_params__'):
+ f.__click_params__ = []
+ f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+ """Attaches an argument to the command. All positional arguments are
+ passed as parameter declarations to :class:`Argument`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Argument` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the argument class to instantiate. This defaults to
+ :class:`Argument`.
+ """
+ def decorator(f):
+ ArgumentClass = attrs.pop('cls', Argument)
+ _param_memo(f, ArgumentClass(param_decls, **attrs))
+ return f
+ return decorator
+
+
+def option(*param_decls, **attrs):
+ """Attaches an option to the command. All positional arguments are
+ passed as parameter declarations to :class:`Option`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Option` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the option class to instantiate. This defaults to
+ :class:`Option`.
+ """
+ def decorator(f):
+ if 'help' in attrs:
+ attrs['help'] = inspect.cleandoc(attrs['help'])
+ OptionClass = attrs.pop('cls', Option)
+ _param_memo(f, OptionClass(param_decls, **attrs))
+ return f
+ return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+ """Shortcut for confirmation prompts that can be ignored by passing
+ ``--yes`` as parameter.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ @click.command()
+ @click.option('--yes', is_flag=True, callback=callback,
+ expose_value=False, prompt='Do you want to continue?')
+ def dropdb():
+ pass
+ """
+ def decorator(f):
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('callback', callback)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('prompt', 'Do you want to continue?')
+ attrs.setdefault('help', 'Confirm the action without prompting.')
+ return option(*(param_decls or ('--yes',)), **attrs)(f)
+ return decorator
+
+
+def password_option(*param_decls, **attrs):
+ """Shortcut for password prompts.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ @click.command()
+ @click.option('--password', prompt=True, confirmation_prompt=True,
+ hide_input=True)
+ def changeadmin(password):
+ pass
+ """
+ def decorator(f):
+ attrs.setdefault('prompt', True)
+ attrs.setdefault('confirmation_prompt', True)
+ attrs.setdefault('hide_input', True)
+ return option(*(param_decls or ('--password',)), **attrs)(f)
+ return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+ """Adds a ``--version`` option which immediately ends the program
+ printing out the version number. This is implemented as an eager
+ option that prints the version and exits the program in the callback.
+
+ :param version: the version number to show. If not provided Click
+ attempts an auto discovery via setuptools.
+ :param prog_name: the name of the program (defaults to autodetection)
+ :param message: custom message to show instead of the default
+ (``'%(prog)s, version %(version)s'``)
+ :param others: everything else is forwarded to :func:`option`.
+ """
+ if version is None:
+ module = sys._getframe(1).f_globals.get('__name__')
+ def decorator(f):
+ prog_name = attrs.pop('prog_name', None)
+ message = attrs.pop('message', '%(prog)s, version %(version)s')
+
+ def callback(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ prog = prog_name
+ if prog is None:
+ prog = ctx.find_root().info_name
+ ver = version
+ if ver is None:
+ try:
+ import pkg_resources
+ except ImportError:
+ pass
+ else:
+ for dist in pkg_resources.working_set:
+ scripts = dist.get_entry_map().get('console_scripts') or {}
+ for script_name, entry_point in iteritems(scripts):
+ if entry_point.module_name == module:
+ ver = dist.version
+ break
+ if ver is None:
+ raise RuntimeError('Could not determine version')
+ echo(message % {
+ 'prog': prog,
+ 'version': ver,
+ }, color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('is_eager', True)
+ attrs.setdefault('help', 'Show the version and exit.')
+ attrs['callback'] = callback
+ return option(*(param_decls or ('--version',)), **attrs)(f)
+ return decorator
+
+
+def help_option(*param_decls, **attrs):
+ """Adds a ``--help`` option which immediately ends the program
+ printing out the help page. This is usually unnecessary to add as
+ this is added by default to all commands unless suppressed.
+
+ Like :func:`version_option`, this is implemented as eager option that
+ prints in the callback and exits.
+
+ All arguments are forwarded to :func:`option`.
+ """
+ def decorator(f):
+ def callback(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+ attrs.setdefault('is_flag', True)
+ attrs.setdefault('expose_value', False)
+ attrs.setdefault('help', 'Show this message and exit.')
+ attrs.setdefault('is_eager', True)
+ attrs['callback'] = callback
+ return option(*(param_decls or ('--help',)), **attrs)(f)
+ return decorator
+
+
+# Circular dependencies between core and decorators
+from .core import Command, Group, Argument, Option
diff --git a/app/lib/click/exceptions.py b/app/lib/click/exceptions.py
new file mode 100644
index 0000000..74a4542
--- /dev/null
+++ b/app/lib/click/exceptions.py
@@ -0,0 +1,201 @@
+from ._compat import PY2, filename_to_ui, get_text_stderr
+from .utils import echo
+
+
+class ClickException(Exception):
+ """An exception that Click can handle and show to the user."""
+
+ #: The exit code for this exception
+ exit_code = 1
+
+ def __init__(self, message):
+ if PY2:
+ if message is not None:
+ message = message.encode('utf-8')
+ Exception.__init__(self, message)
+ self.message = message
+
+ def format_message(self):
+ return self.message
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ echo('Error: %s' % self.format_message(), file=file)
+
+
+class UsageError(ClickException):
+ """An internal exception that signals a usage error. This typically
+ aborts any further handling.
+
+ :param message: the error message to display.
+ :param ctx: optionally the context that caused this error. Click will
+ fill in the context automatically in some situations.
+ """
+ exit_code = 2
+
+ def __init__(self, message, ctx=None):
+ ClickException.__init__(self, message)
+ self.ctx = ctx
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ color = None
+ if self.ctx is not None:
+ color = self.ctx.color
+ echo(self.ctx.get_usage() + '\n', file=file, color=color)
+ echo('Error: %s' % self.format_message(), file=file, color=color)
+
+
+class BadParameter(UsageError):
+ """An exception that formats out a standardized error message for a
+ bad parameter. This is useful when thrown from a callback or type as
+ Click will attach contextual information to it (for instance, which
+ parameter it is).
+
+ .. versionadded:: 2.0
+
+ :param param: the parameter object that caused this error. This can
+ be left out, and Click will attach this info itself
+ if possible.
+ :param param_hint: a string that shows up as parameter name. This
+ can be used as alternative to `param` in cases
+ where custom validation should happen. If it is
+ a string it's used as such, if it's a list then
+ each item is quoted and separated.
+ """
+
+ def __init__(self, message, ctx=None, param=None,
+ param_hint=None):
+ UsageError.__init__(self, message, ctx)
+ self.param = param
+ self.param_hint = param_hint
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.opts or [self.param.human_readable_name]
+ else:
+ return 'Invalid value: %s' % self.message
+ if isinstance(param_hint, (tuple, list)):
+ param_hint = ' / '.join('"%s"' % x for x in param_hint)
+ return 'Invalid value for %s: %s' % (param_hint, self.message)
+
+
+class MissingParameter(BadParameter):
+ """Raised if click required an option or argument but it was not
+ provided when invoking the script.
+
+ .. versionadded:: 4.0
+
+ :param param_type: a string that indicates the type of the parameter.
+ The default is to inherit the parameter type from
+ the given `param`. Valid values are ``'parameter'``,
+ ``'option'`` or ``'argument'``.
+ """
+
+ def __init__(self, message=None, ctx=None, param=None,
+ param_hint=None, param_type=None):
+ BadParameter.__init__(self, message, ctx, param, param_hint)
+ self.param_type = param_type
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.opts or [self.param.human_readable_name]
+ else:
+ param_hint = None
+ if isinstance(param_hint, (tuple, list)):
+ param_hint = ' / '.join('"%s"' % x for x in param_hint)
+
+ param_type = self.param_type
+ if param_type is None and self.param is not None:
+ param_type = self.param.param_type_name
+
+ msg = self.message
+ if self.param is not None:
+ msg_extra = self.param.type.get_missing_message(self.param)
+ if msg_extra:
+ if msg:
+ msg += '. ' + msg_extra
+ else:
+ msg = msg_extra
+
+ return 'Missing %s%s%s%s' % (
+ param_type,
+ param_hint and ' %s' % param_hint or '',
+ msg and '. ' or '.',
+ msg or '',
+ )
+
+
+class NoSuchOption(UsageError):
+ """Raised if click attempted to handle an option that does not
+ exist.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, option_name, message=None, possibilities=None,
+ ctx=None):
+ if message is None:
+ message = 'no such option: %s' % option_name
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+ self.possibilities = possibilities
+
+ def format_message(self):
+ bits = [self.message]
+ if self.possibilities:
+ if len(self.possibilities) == 1:
+ bits.append('Did you mean %s?' % self.possibilities[0])
+ else:
+ possibilities = sorted(self.possibilities)
+ bits.append('(Possible options: %s)' % ', '.join(possibilities))
+ return ' '.join(bits)
+
+
+class BadOptionUsage(UsageError):
+ """Raised if an option is generally supplied but the use of the option
+ was incorrect. This is for instance raised if the number of arguments
+ for an option is not correct.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class BadArgumentUsage(UsageError):
+ """Raised if an argument is generally supplied but the use of the argument
+ was incorrect. This is for instance raised if the number of values
+ for an argument is not correct.
+
+ .. versionadded:: 6.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+ """Raised if a file cannot be opened."""
+
+ def __init__(self, filename, hint=None):
+ ui_filename = filename_to_ui(filename)
+ if hint is None:
+ hint = 'unknown error'
+ ClickException.__init__(self, hint)
+ self.ui_filename = ui_filename
+ self.filename = filename
+
+ def format_message(self):
+ return 'Could not open file %s: %s' % (self.ui_filename, self.message)
+
+
+class Abort(RuntimeError):
+ """An internal signalling exception that signals Click to abort."""
diff --git a/app/lib/click/formatting.py b/app/lib/click/formatting.py
new file mode 100644
index 0000000..a3d6a4d
--- /dev/null
+++ b/app/lib/click/formatting.py
@@ -0,0 +1,256 @@
+from contextlib import contextmanager
+from .termui import get_terminal_size
+from .parser import split_opt
+from ._compat import term_len
+
+
+# Can force a width. This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+ widths = {}
+ for row in rows:
+ for idx, col in enumerate(row):
+ widths[idx] = max(widths.get(idx, 0), term_len(col))
+ return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+ for row in rows:
+ row = tuple(row)
+ yield row + ('',) * (col_count - len(row))
+
+
+def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
+ preserve_paragraphs=False):
+ """A helper function that intelligently wraps text. By default, it
+ assumes that it operates on a single paragraph of text but if the
+ `preserve_paragraphs` parameter is provided it will intelligently
+ handle paragraphs (defined by two empty lines).
+
+ If paragraphs are handled, a paragraph can be prefixed with an empty
+ line containing the ``\\b`` character (``\\x08``) to indicate that
+ no rewrapping should happen in that block.
+
+ :param text: the text that should be rewrapped.
+ :param width: the maximum width for the text.
+ :param initial_indent: the initial indent that should be placed on the
+ first line as a string.
+ :param subsequent_indent: the indent string that should be placed on
+ each consecutive line.
+ :param preserve_paragraphs: if this flag is set then the wrapping will
+ intelligently handle paragraphs.
+ """
+ from ._textwrap import TextWrapper
+ text = text.expandtabs()
+ wrapper = TextWrapper(width, initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent,
+ replace_whitespace=False)
+ if not preserve_paragraphs:
+ return wrapper.fill(text)
+
+ p = []
+ buf = []
+ indent = None
+
+ def _flush_par():
+ if not buf:
+ return
+ if buf[0].strip() == '\b':
+ p.append((indent or 0, True, '\n'.join(buf[1:])))
+ else:
+ p.append((indent or 0, False, ' '.join(buf)))
+ del buf[:]
+
+ for line in text.splitlines():
+ if not line:
+ _flush_par()
+ indent = None
+ else:
+ if indent is None:
+ orig_len = term_len(line)
+ line = line.lstrip()
+ indent = orig_len - term_len(line)
+ buf.append(line)
+ _flush_par()
+
+ rv = []
+ for indent, raw, text in p:
+ with wrapper.extra_indent(' ' * indent):
+ if raw:
+ rv.append(wrapper.indent_only(text))
+ else:
+ rv.append(wrapper.fill(text))
+
+ return '\n\n'.join(rv)
+
+
+class HelpFormatter(object):
+ """This class helps with formatting text-based help pages. It's
+ usually just needed for very special internal cases, but it's also
+ exposed so that developers can write their own fancy outputs.
+
+ At present, it always writes into memory.
+
+ :param indent_increment: the additional increment for each level.
+ :param width: the width for the text. This defaults to the terminal
+ width clamped to a maximum of 78.
+ """
+
+ def __init__(self, indent_increment=2, width=None, max_width=None):
+ self.indent_increment = indent_increment
+ if max_width is None:
+ max_width = 80
+ if width is None:
+ width = FORCED_WIDTH
+ if width is None:
+ width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+ self.width = width
+ self.current_indent = 0
+ self.buffer = []
+
+ def write(self, string):
+ """Writes a unicode string into the internal buffer."""
+ self.buffer.append(string)
+
+ def indent(self):
+ """Increases the indentation."""
+ self.current_indent += self.indent_increment
+
+ def dedent(self):
+ """Decreases the indentation."""
+ self.current_indent -= self.indent_increment
+
+ def write_usage(self, prog, args='', prefix='Usage: '):
+ """Writes a usage line into the buffer.
+
+ :param prog: the program name.
+ :param args: whitespace separated list of arguments.
+ :param prefix: the prefix for the first line.
+ """
+ usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
+ text_width = self.width - self.current_indent
+
+ if text_width >= (term_len(usage_prefix) + 20):
+ # The arguments will fit to the right of the prefix.
+ indent = ' ' * term_len(usage_prefix)
+ self.write(wrap_text(args, text_width,
+ initial_indent=usage_prefix,
+ subsequent_indent=indent))
+ else:
+ # The prefix is too long, put the arguments on the next line.
+ self.write(usage_prefix)
+ self.write('\n')
+ indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
+ self.write(wrap_text(args, text_width,
+ initial_indent=indent,
+ subsequent_indent=indent))
+
+ self.write('\n')
+
+ def write_heading(self, heading):
+ """Writes a heading into the buffer."""
+ self.write('%*s%s:\n' % (self.current_indent, '', heading))
+
+ def write_paragraph(self):
+ """Writes a paragraph into the buffer."""
+ if self.buffer:
+ self.write('\n')
+
+ def write_text(self, text):
+ """Writes re-indented text into the buffer. This rewraps and
+ preserves paragraphs.
+ """
+ text_width = max(self.width - self.current_indent, 11)
+ indent = ' ' * self.current_indent
+ self.write(wrap_text(text, text_width,
+ initial_indent=indent,
+ subsequent_indent=indent,
+ preserve_paragraphs=True))
+ self.write('\n')
+
+ def write_dl(self, rows, col_max=30, col_spacing=2):
+ """Writes a definition list into the buffer. This is how options
+ and commands are usually formatted.
+
+ :param rows: a list of two item tuples for the terms and values.
+ :param col_max: the maximum width of the first column.
+ :param col_spacing: the number of spaces between the first and
+ second column.
+ """
+ rows = list(rows)
+ widths = measure_table(rows)
+ if len(widths) != 2:
+ raise TypeError('Expected two columns for definition list')
+
+ first_col = min(widths[0], col_max) + col_spacing
+
+ for first, second in iter_rows(rows, len(widths)):
+ self.write('%*s%s' % (self.current_indent, '', first))
+ if not second:
+ self.write('\n')
+ continue
+ if term_len(first) <= first_col - col_spacing:
+ self.write(' ' * (first_col - term_len(first)))
+ else:
+ self.write('\n')
+ self.write(' ' * (first_col + self.current_indent))
+
+ text_width = max(self.width - first_col - 2, 10)
+ lines = iter(wrap_text(second, text_width).splitlines())
+ if lines:
+ self.write(next(lines) + '\n')
+ for line in lines:
+ self.write('%*s%s\n' % (
+ first_col + self.current_indent, '', line))
+ else:
+ self.write('\n')
+
+ @contextmanager
+ def section(self, name):
+ """Helpful context manager that writes a paragraph, a heading,
+ and the indents.
+
+ :param name: the section name that is written as heading.
+ """
+ self.write_paragraph()
+ self.write_heading(name)
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ @contextmanager
+ def indentation(self):
+ """A context manager that increases the indentation."""
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ def getvalue(self):
+ """Returns the buffer contents."""
+ return ''.join(self.buffer)
+
+
+def join_options(options):
+ """Given a list of option strings this joins them in the most appropriate
+ way and returns them in the form ``(formatted_string,
+ any_prefix_is_slash)`` where the second item in the tuple is a flag that
+ indicates if any of the option prefixes was a slash.
+ """
+ rv = []
+ any_prefix_is_slash = False
+ for opt in options:
+ prefix = split_opt(opt)[0]
+ if prefix == '/':
+ any_prefix_is_slash = True
+ rv.append((len(prefix), opt))
+
+ rv.sort(key=lambda x: x[0])
+
+ rv = ', '.join(x[1] for x in rv)
+ return rv, any_prefix_is_slash
diff --git a/app/lib/click/globals.py b/app/lib/click/globals.py
new file mode 100644
index 0000000..14338e6
--- /dev/null
+++ b/app/lib/click/globals.py
@@ -0,0 +1,48 @@
+from threading import local
+
+
+_local = local()
+
+
+def get_current_context(silent=False):
+ """Returns the current click context. This can be used as a way to
+ access the current context object from anywhere. This is a more implicit
+ alternative to the :func:`pass_context` decorator. This function is
+ primarily useful for helpers such as :func:`echo` which might be
+ interested in changing it's behavior based on the current context.
+
+ To push the current context, :meth:`Context.scope` can be used.
+
+ .. versionadded:: 5.0
+
+ :param silent: is set to `True` the return value is `None` if no context
+ is available. The default behavior is to raise a
+ :exc:`RuntimeError`.
+ """
+ try:
+ return getattr(_local, 'stack')[-1]
+ except (AttributeError, IndexError):
+ if not silent:
+ raise RuntimeError('There is no active click context.')
+
+
+def push_context(ctx):
+ """Pushes a new context to the current stack."""
+ _local.__dict__.setdefault('stack', []).append(ctx)
+
+
+def pop_context():
+ """Removes the top level from the stack."""
+ _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+ """"Internal helper to get the default value of the color flag. If a
+ value is passed it's returned unchanged, otherwise it's looked up from
+ the current context.
+ """
+ if color is not None:
+ return color
+ ctx = get_current_context(silent=True)
+ if ctx is not None:
+ return ctx.color
diff --git a/app/lib/click/parser.py b/app/lib/click/parser.py
new file mode 100644
index 0000000..9775c9f
--- /dev/null
+++ b/app/lib/click/parser.py
@@ -0,0 +1,426 @@
+# -*- coding: utf-8 -*-
+"""
+ click.parser
+ ~~~~~~~~~~~~
+
+ This module started out as largely a copy paste from the stdlib's
+ optparse module with the features removed that we do not need from
+ optparse because we implement them in Click on a higher level (for
+ instance type handling, help formatting and a lot more).
+
+ The plan is to remove more and more from here over time.
+
+ The reason this is a different module and not optparse from the stdlib
+ is that there are differences in 2.x and 3.x about the error messages
+ generated and optparse in the stdlib uses gettext for no good reason
+ and might cause us issues.
+"""
+import re
+from collections import deque
+from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
+ BadArgumentUsage
+
+
+def _unpack_args(args, nargs_spec):
+ """Given an iterable of arguments and an iterable of nargs specifications,
+ it returns a tuple with all the unpacked arguments at the first index
+ and all remaining arguments as the second.
+
+ The nargs specification is the number of arguments that should be consumed
+ or `-1` to indicate that this position should eat up all the remainders.
+
+ Missing items are filled with `None`.
+ """
+ args = deque(args)
+ nargs_spec = deque(nargs_spec)
+ rv = []
+ spos = None
+
+ def _fetch(c):
+ try:
+ if spos is None:
+ return c.popleft()
+ else:
+ return c.pop()
+ except IndexError:
+ return None
+
+ while nargs_spec:
+ nargs = _fetch(nargs_spec)
+ if nargs == 1:
+ rv.append(_fetch(args))
+ elif nargs > 1:
+ x = [_fetch(args) for _ in range(nargs)]
+ # If we're reversed, we're pulling in the arguments in reverse,
+ # so we need to turn them around.
+ if spos is not None:
+ x.reverse()
+ rv.append(tuple(x))
+ elif nargs < 0:
+ if spos is not None:
+ raise TypeError('Cannot have two nargs < 0')
+ spos = len(rv)
+ rv.append(None)
+
+ # spos is the position of the wildcard (star). If it's not `None`,
+ # we fill it with the remainder.
+ if spos is not None:
+ rv[spos] = tuple(args)
+ args = []
+ rv[spos + 1:] = reversed(rv[spos + 1:])
+
+ return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+ if nargs == 1:
+ raise BadOptionUsage('%s option requires an argument' % opt)
+ raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
+
+
+def split_opt(opt):
+ first = opt[:1]
+ if first.isalnum():
+ return '', opt
+ if opt[1:2] == first:
+ return opt[:2], opt[2:]
+ return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+ if ctx is None or ctx.token_normalize_func is None:
+ return opt
+ prefix, opt = split_opt(opt)
+ return prefix + ctx.token_normalize_func(opt)
+
+
+def split_arg_string(string):
+ """Given an argument string this attempts to split it into small parts."""
+ rv = []
+ for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+ r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
+ r'|\S+)\s*', string, re.S):
+ arg = match.group().strip()
+ if arg[:1] == arg[-1:] and arg[:1] in '"\'':
+ arg = arg[1:-1].encode('ascii', 'backslashreplace') \
+ .decode('unicode-escape')
+ try:
+ arg = type(string)(arg)
+ except UnicodeError:
+ pass
+ rv.append(arg)
+ return rv
+
+
+class Option(object):
+
+ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ self._short_opts = []
+ self._long_opts = []
+ self.prefixes = set()
+
+ for opt in opts:
+ prefix, value = split_opt(opt)
+ if not prefix:
+ raise ValueError('Invalid start character for option (%s)'
+ % opt)
+ self.prefixes.add(prefix[0])
+ if len(prefix) == 1 and len(value) == 1:
+ self._short_opts.append(opt)
+ else:
+ self._long_opts.append(opt)
+ self.prefixes.add(prefix)
+
+ if action is None:
+ action = 'store'
+
+ self.dest = dest
+ self.action = action
+ self.nargs = nargs
+ self.const = const
+ self.obj = obj
+
+ @property
+ def takes_value(self):
+ return self.action in ('store', 'append')
+
+ def process(self, value, state):
+ if self.action == 'store':
+ state.opts[self.dest] = value
+ elif self.action == 'store_const':
+ state.opts[self.dest] = self.const
+ elif self.action == 'append':
+ state.opts.setdefault(self.dest, []).append(value)
+ elif self.action == 'append_const':
+ state.opts.setdefault(self.dest, []).append(self.const)
+ elif self.action == 'count':
+ state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+ else:
+ raise ValueError('unknown action %r' % self.action)
+ state.order.append(self.obj)
+
+
+class Argument(object):
+
+ def __init__(self, dest, nargs=1, obj=None):
+ self.dest = dest
+ self.nargs = nargs
+ self.obj = obj
+
+ def process(self, value, state):
+ if self.nargs > 1:
+ holes = sum(1 for x in value if x is None)
+ if holes == len(value):
+ value = None
+ elif holes != 0:
+ raise BadArgumentUsage('argument %s takes %d values'
+ % (self.dest, self.nargs))
+ state.opts[self.dest] = value
+ state.order.append(self.obj)
+
+
+class ParsingState(object):
+
+ def __init__(self, rargs):
+ self.opts = {}
+ self.largs = []
+ self.rargs = rargs
+ self.order = []
+
+
+class OptionParser(object):
+ """The option parser is an internal class that is ultimately used to
+ parse options and arguments. It's modelled after optparse and brings
+ a similar but vastly simplified API. It should generally not be used
+ directly as the high level Click classes wrap it for you.
+
+ It's not nearly as extensible as optparse or argparse as it does not
+ implement features that are implemented on a higher level (such as
+ types or defaults).
+
+ :param ctx: optionally the :class:`~click.Context` where this parser
+ should go with.
+ """
+
+ def __init__(self, ctx=None):
+ #: The :class:`~click.Context` for this parser. This might be
+ #: `None` for some advanced use cases.
+ self.ctx = ctx
+ #: This controls how the parser deals with interspersed arguments.
+ #: If this is set to `False`, the parser will stop on the first
+ #: non-option. Click uses this to implement nested subcommands
+ #: safely.
+ self.allow_interspersed_args = True
+ #: This tells the parser how to deal with unknown options. By
+ #: default it will error out (which is sensible), but there is a
+ #: second mode where it will ignore it and continue processing
+ #: after shifting all the unknown options into the resulting args.
+ self.ignore_unknown_options = False
+ if ctx is not None:
+ self.allow_interspersed_args = ctx.allow_interspersed_args
+ self.ignore_unknown_options = ctx.ignore_unknown_options
+ self._short_opt = {}
+ self._long_opt = {}
+ self._opt_prefixes = set(['-', '--'])
+ self._args = []
+
+ def add_option(self, opts, dest, action=None, nargs=1, const=None,
+ obj=None):
+ """Adds a new option named `dest` to the parser. The destination
+ is not inferred (unlike with optparse) and needs to be explicitly
+ provided. Action can be any of ``store``, ``store_const``,
+ ``append``, ``appnd_const`` or ``count``.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ opts = [normalize_opt(opt, self.ctx) for opt in opts]
+ option = Option(opts, dest, action=action, nargs=nargs,
+ const=const, obj=obj)
+ self._opt_prefixes.update(option.prefixes)
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ def add_argument(self, dest, nargs=1, obj=None):
+ """Adds a positional argument named `dest` to the parser.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+ def parse_args(self, args):
+ """Parses positional arguments and returns ``(values, args, order)``
+ for the parsed options and arguments as well as the leftover
+ arguments if there are any. The order is a list of objects as they
+ appear on the command line. If arguments appear multiple times they
+ will be memorized multiple times as well.
+ """
+ state = ParsingState(args)
+ try:
+ self._process_args_for_options(state)
+ self._process_args_for_args(state)
+ except UsageError:
+ if self.ctx is None or not self.ctx.resilient_parsing:
+ raise
+ return state.opts, state.largs, state.order
+
+ def _process_args_for_args(self, state):
+ pargs, args = _unpack_args(state.largs + state.rargs,
+ [x.nargs for x in self._args])
+
+ for idx, arg in enumerate(self._args):
+ arg.process(pargs[idx], state)
+
+ state.largs = args
+ state.rargs = []
+
+ def _process_args_for_options(self, state):
+ while state.rargs:
+ arg = state.rargs.pop(0)
+ arglen = len(arg)
+ # Double dashes always handled explicitly regardless of what
+ # prefixes are valid.
+ if arg == '--':
+ return
+ elif arg[:1] in self._opt_prefixes and arglen > 1:
+ self._process_opts(arg, state)
+ elif self.allow_interspersed_args:
+ state.largs.append(arg)
+ else:
+ state.rargs.insert(0, arg)
+ return
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt, explicit_value, state):
+ if opt not in self._long_opt:
+ possibilities = [word for word in self._long_opt
+ if word.startswith(opt)]
+ raise NoSuchOption(opt, possibilities=possibilities)
+
+ option = self._long_opt[opt]
+ if option.takes_value:
+ # At this point it's safe to modify rargs by injecting the
+ # explicit value, because no exception is raised in this
+ # branch. This means that the inserted value will be fully
+ # consumed.
+ if explicit_value is not None:
+ state.rargs.insert(0, explicit_value)
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ elif explicit_value is not None:
+ raise BadOptionUsage('%s option does not take a value' % opt)
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ def _match_short_opt(self, arg, state):
+ stop = False
+ i = 1
+ prefix = arg[0]
+ unknown_options = []
+
+ for ch in arg[1:]:
+ opt = normalize_opt(prefix + ch, self.ctx)
+ option = self._short_opt.get(opt)
+ i += 1
+
+ if not option:
+ if self.ignore_unknown_options:
+ unknown_options.append(ch)
+ continue
+ raise NoSuchOption(opt)
+ if option.takes_value:
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ state.rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ if stop:
+ break
+
+ # If we got any unknown options we re-combinate the string of the
+ # remaining options and re-attach the prefix, then report that
+ # to the state as new larg. This way there is basic combinatorics
+ # that can be achieved while still ignoring unknown arguments.
+ if self.ignore_unknown_options and unknown_options:
+ state.largs.append(prefix + ''.join(unknown_options))
+
+ def _process_opts(self, arg, state):
+ explicit_value = None
+ # Long option handling happens in two parts. The first part is
+ # supporting explicitly attached values. In any case, we will try
+ # to long match the option first.
+ if '=' in arg:
+ long_opt, explicit_value = arg.split('=', 1)
+ else:
+ long_opt = arg
+ norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+ # At this point we will match the (assumed) long option through
+ # the long option matching code. Note that this allows options
+ # like "-foo" to be matched as long options.
+ try:
+ self._match_long_opt(norm_long_opt, explicit_value, state)
+ except NoSuchOption:
+ # At this point the long option matching failed, and we need
+ # to try with short options. However there is a special rule
+ # which says, that if we have a two character options prefix
+ # (applies to "--foo" for instance), we do not dispatch to the
+ # short option code and will instead raise the no option
+ # error.
+ if arg[:2] not in self._opt_prefixes:
+ return self._match_short_opt(arg, state)
+ if not self.ignore_unknown_options:
+ raise
+ state.largs.append(arg)
diff --git a/app/lib/click/termui.py b/app/lib/click/termui.py
new file mode 100644
index 0000000..d9fba52
--- /dev/null
+++ b/app/lib/click/termui.py
@@ -0,0 +1,539 @@
+import os
+import sys
+import struct
+
+from ._compat import raw_input, text_type, string_types, \
+ isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
+from .utils import echo
+from .exceptions import Abort, UsageError
+from .types import convert_type
+from .globals import resolve_color_default
+
+
+# The prompt functions to use. The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = raw_input
+
+_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
+ 'cyan', 'white', 'reset')
+_ansi_reset_all = '\033[0m'
+
+
+def hidden_prompt_func(prompt):
+ import getpass
+ return getpass.getpass(prompt)
+
+
+def _build_prompt(text, suffix, show_default=False, default=None):
+ prompt = text
+ if default is not None and show_default:
+ prompt = '%s [%s]' % (prompt, default)
+ return prompt + suffix
+
+
+def prompt(text, default=None, hide_input=False,
+ confirmation_prompt=False, type=None,
+ value_proc=None, prompt_suffix=': ',
+ show_default=True, err=False):
+ """Prompts a user for input. This is a convenience function that can
+ be used to prompt a user for input later.
+
+ If the user aborts the input by sending a interrupt signal, this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 6.0
+ Added unicode support for cmd.exe on Windows.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the text to show for the prompt.
+ :param default: the default value to use if no input happens. If this
+ is not given it will prompt until it's aborted.
+ :param hide_input: if this is set to true then the input value will
+ be hidden.
+ :param confirmation_prompt: asks for confirmation for the value.
+ :param type: the type to use to check the value against.
+ :param value_proc: if this parameter is provided it's a function that
+ is invoked instead of the type conversion to
+ convert a value.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ result = None
+
+ def prompt_func(text):
+ f = hide_input and hidden_prompt_func or visible_prompt_func
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(text, nl=False, err=err)
+ return f('')
+ except (KeyboardInterrupt, EOFError):
+ # getpass doesn't print a newline if the user aborts input with ^C.
+ # Allegedly this behavior is inherited from getpass(3).
+ # A doc bug has been filed at https://bugs.python.org/issue24711
+ if hide_input:
+ echo(None, err=err)
+ raise Abort()
+
+ if value_proc is None:
+ value_proc = convert_type(type, default)
+
+ prompt = _build_prompt(text, prompt_suffix, show_default, default)
+
+ while 1:
+ while 1:
+ value = prompt_func(prompt)
+ if value:
+ break
+ # If a default is set and used, then the confirmation
+ # prompt is always skipped because that's the only thing
+ # that really makes sense.
+ elif default is not None:
+ return default
+ try:
+ result = value_proc(value)
+ except UsageError as e:
+ echo('Error: %s' % e.message, err=err)
+ continue
+ if not confirmation_prompt:
+ return result
+ while 1:
+ value2 = prompt_func('Repeat for confirmation: ')
+ if value2:
+ break
+ if value == value2:
+ return result
+ echo('Error: the two entered values do not match', err=err)
+
+
+def confirm(text, default=False, abort=False, prompt_suffix=': ',
+ show_default=True, err=False):
+ """Prompts for confirmation (yes/no question).
+
+ If the user aborts the input by sending a interrupt signal this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the question to ask.
+ :param default: the default for the prompt.
+ :param abort: if this is set to `True` a negative answer aborts the
+ exception by raising :exc:`Abort`.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ prompt = _build_prompt(text, prompt_suffix, show_default,
+ default and 'Y/n' or 'y/N')
+ while 1:
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(prompt, nl=False, err=err)
+ value = visible_prompt_func('').lower().strip()
+ except (KeyboardInterrupt, EOFError):
+ raise Abort()
+ if value in ('y', 'yes'):
+ rv = True
+ elif value in ('n', 'no'):
+ rv = False
+ elif value == '':
+ rv = default
+ else:
+ echo('Error: invalid input', err=err)
+ continue
+ break
+ if abort and not rv:
+ raise Abort()
+ return rv
+
+
+def get_terminal_size():
+ """Returns the current size of the terminal as tuple in the form
+ ``(width, height)`` in columns and rows.
+ """
+ # If shutil has get_terminal_size() (Python 3.3 and later) use that
+ if sys.version_info >= (3, 3):
+ import shutil
+ shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
+ if shutil_get_terminal_size:
+ sz = shutil_get_terminal_size()
+ return sz.columns, sz.lines
+
+ if get_winterm_size is not None:
+ return get_winterm_size()
+
+ def ioctl_gwinsz(fd):
+ try:
+ import fcntl
+ import termios
+ cr = struct.unpack(
+ 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+ except Exception:
+ return
+ return cr
+
+ cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ try:
+ cr = ioctl_gwinsz(fd)
+ finally:
+ os.close(fd)
+ except Exception:
+ pass
+ if not cr or not cr[0] or not cr[1]:
+ cr = (os.environ.get('LINES', 25),
+ os.environ.get('COLUMNS', DEFAULT_COLUMNS))
+ return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text, color=None):
+ """This function takes a text and shows it via an environment specific
+ pager on stdout.
+
+ .. versionchanged:: 3.0
+ Added the `color` flag.
+
+ :param text: the text to page.
+ :param color: controls if the pager supports ANSI colors or not. The
+ default is autodetection.
+ """
+ color = resolve_color_default(color)
+ if not isinstance(text, string_types):
+ text = text_type(text)
+ from ._termui_impl import pager
+ return pager(text + '\n', color)
+
+
+def progressbar(iterable=None, length=None, label=None, show_eta=True,
+ show_percent=None, show_pos=False,
+ item_show_func=None, fill_char='#', empty_char='-',
+ bar_template='%(label)s [%(bar)s] %(info)s',
+ info_sep=' ', width=36, file=None, color=None):
+ """This function creates an iterable context manager that can be used
+ to iterate over something while showing a progress bar. It will
+ either iterate over the `iterable` or `length` items (that are counted
+ up). While iteration happens, this function will print a rendered
+ progress bar to the given `file` (defaults to stdout) and will attempt
+ to calculate remaining time and more. By default, this progress bar
+ will not be rendered if the file is not a terminal.
+
+ The context manager creates the progress bar. When the context
+ manager is entered the progress bar is already displayed. With every
+ iteration over the progress bar, the iterable passed to the bar is
+ advanced and the bar is updated. When the context manager exits,
+ a newline is printed and the progress bar is finalized on screen.
+
+ No printing must happen or the progress bar will be unintentionally
+ destroyed.
+
+ Example usage::
+
+ with progressbar(items) as bar:
+ for item in bar:
+ do_something_with(item)
+
+ Alternatively, if no iterable is specified, one can manually update the
+ progress bar through the `update()` method instead of directly
+ iterating over the progress bar. The update method accepts the number
+ of steps to increment the bar with::
+
+ with progressbar(length=chunks.total_bytes) as bar:
+ for chunk in chunks:
+ process_chunk(chunk)
+ bar.update(chunks.bytes)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `color` parameter. Added a `update` method to the
+ progressbar object.
+
+ :param iterable: an iterable to iterate over. If not provided the length
+ is required.
+ :param length: the number of items to iterate over. By default the
+ progressbar will attempt to ask the iterator about its
+ length, which might or might not work. If an iterable is
+ also provided this parameter can be used to override the
+ length. If an iterable is not provided the progress bar
+ will iterate over a range of that length.
+ :param label: the label to show next to the progress bar.
+ :param show_eta: enables or disables the estimated time display. This is
+ automatically disabled if the length cannot be
+ determined.
+ :param show_percent: enables or disables the percentage display. The
+ default is `True` if the iterable has a length or
+ `False` if not.
+ :param show_pos: enables or disables the absolute position display. The
+ default is `False`.
+ :param item_show_func: a function called with the current item which
+ can return a string to show the current item
+ next to the progress bar. Note that the current
+ item can be `None`!
+ :param fill_char: the character to use to show the filled part of the
+ progress bar.
+ :param empty_char: the character to use to show the non-filled part of
+ the progress bar.
+ :param bar_template: the format string to use as template for the bar.
+ The parameters in it are ``label`` for the label,
+ ``bar`` for the progress bar and ``info`` for the
+ info section.
+ :param info_sep: the separator between multiple info items (eta etc.)
+ :param width: the width of the progress bar in characters, 0 means full
+ terminal width
+ :param file: the file to write to. If this is not a terminal then
+ only the label is printed.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are included anywhere in the progress bar output
+ which is not the case by default.
+ """
+ from ._termui_impl import ProgressBar
+ color = resolve_color_default(color)
+ return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
+ show_percent=show_percent, show_pos=show_pos,
+ item_show_func=item_show_func, fill_char=fill_char,
+ empty_char=empty_char, bar_template=bar_template,
+ info_sep=info_sep, file=file, label=label,
+ width=width, color=color)
+
+
+def clear():
+ """Clears the terminal screen. This will have the effect of clearing
+ the whole visible space of the terminal and moving the cursor to the
+ top left. This does not do anything if not connected to a terminal.
+
+ .. versionadded:: 2.0
+ """
+ if not isatty(sys.stdout):
+ return
+ # If we're on Windows and we don't have colorama available, then we
+ # clear the screen by shelling out. Otherwise we can use an escape
+ # sequence.
+ if WIN:
+ os.system('cls')
+ else:
+ sys.stdout.write('\033[2J\033[1;1H')
+
+
+def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
+ blink=None, reverse=None, reset=True):
+ """Styles a text with ANSI styles and returns the new string. By
+ default the styling is self contained which means that at the end
+ of the string a reset code is issued. This can be prevented by
+ passing ``reset=False``.
+
+ Examples::
+
+ click.echo(click.style('Hello World!', fg='green'))
+ click.echo(click.style('ATTENTION!', blink=True))
+ click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+ Supported color names:
+
+ * ``black`` (might be a gray)
+ * ``red``
+ * ``green``
+ * ``yellow`` (might be an orange)
+ * ``blue``
+ * ``magenta``
+ * ``cyan``
+ * ``white`` (might be light gray)
+ * ``reset`` (reset the color code only)
+
+ .. versionadded:: 2.0
+
+ :param text: the string to style with ansi codes.
+ :param fg: if provided this will become the foreground color.
+ :param bg: if provided this will become the background color.
+ :param bold: if provided this will enable or disable bold mode.
+ :param dim: if provided this will enable or disable dim mode. This is
+ badly supported.
+ :param underline: if provided this will enable or disable underline.
+ :param blink: if provided this will enable or disable blinking.
+ :param reverse: if provided this will enable or disable inverse
+ rendering (foreground becomes background and the
+ other way round).
+ :param reset: by default a reset-all code is added at the end of the
+ string which means that styles do not carry over. This
+ can be disabled to compose styles.
+ """
+ bits = []
+ if fg:
+ try:
+ bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
+ except ValueError:
+ raise TypeError('Unknown color %r' % fg)
+ if bg:
+ try:
+ bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
+ except ValueError:
+ raise TypeError('Unknown color %r' % bg)
+ if bold is not None:
+ bits.append('\033[%dm' % (1 if bold else 22))
+ if dim is not None:
+ bits.append('\033[%dm' % (2 if dim else 22))
+ if underline is not None:
+ bits.append('\033[%dm' % (4 if underline else 24))
+ if blink is not None:
+ bits.append('\033[%dm' % (5 if blink else 25))
+ if reverse is not None:
+ bits.append('\033[%dm' % (7 if reverse else 27))
+ bits.append(text)
+ if reset:
+ bits.append(_ansi_reset_all)
+ return ''.join(bits)
+
+
+def unstyle(text):
+ """Removes ANSI styling information from a string. Usually it's not
+ necessary to use this function as Click's echo function will
+ automatically remove styling if necessary.
+
+ .. versionadded:: 2.0
+
+ :param text: the text to remove style information from.
+ """
+ return strip_ansi(text)
+
+
+def secho(text, file=None, nl=True, err=False, color=None, **styles):
+ """This function combines :func:`echo` and :func:`style` into one
+ call. As such the following two calls are the same::
+
+ click.secho('Hello World!', fg='green')
+ click.echo(click.style('Hello World!', fg='green'))
+
+ All keyword arguments are forwarded to the underlying functions
+ depending on which one they go with.
+
+ .. versionadded:: 2.0
+ """
+ return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
+
+
+def edit(text=None, editor=None, env=None, require_save=True,
+ extension='.txt', filename=None):
+ r"""Edits the given text in the defined editor. If an editor is given
+ (should be the full path to the executable but the regular operating
+ system search path is used for finding the executable) it overrides
+ the detected editor. Optionally, some environment variables can be
+ used. If the editor is closed without changes, `None` is returned. In
+ case a file is edited directly the return value is always `None` and
+ `require_save` and `extension` are ignored.
+
+ If the editor cannot be opened a :exc:`UsageError` is raised.
+
+ Note for Windows: to simplify cross-platform usage, the newlines are
+ automatically converted from POSIX to Windows and vice versa. As such,
+ the message here will have ``\n`` as newline markers.
+
+ :param text: the text to edit.
+ :param editor: optionally the editor to use. Defaults to automatic
+ detection.
+ :param env: environment variables to forward to the editor.
+ :param require_save: if this is true, then not saving in the editor
+ will make the return value become `None`.
+ :param extension: the extension to tell the editor about. This defaults
+ to `.txt` but changing this might change syntax
+ highlighting.
+ :param filename: if provided it will edit this file instead of the
+ provided text contents. It will not use a temporary
+ file as an indirection in that case.
+ """
+ from ._termui_impl import Editor
+ editor = Editor(editor=editor, env=env, require_save=require_save,
+ extension=extension)
+ if filename is None:
+ return editor.edit(text)
+ editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+ """This function launches the given URL (or filename) in the default
+ viewer application for this file type. If this is an executable, it
+ might launch the executable in a new session. The return value is
+ the exit code of the launched application. Usually, ``0`` indicates
+ success.
+
+ Examples::
+
+ click.launch('http://click.pocoo.org/')
+ click.launch('/my/downloaded/file', locate=True)
+
+ .. versionadded:: 2.0
+
+ :param url: URL or filename of the thing to launch.
+ :param wait: waits for the program to stop.
+ :param locate: if this is set to `True` then instead of launching the
+ application associated with the URL it will attempt to
+ launch a file manager with the file located. This
+ might have weird effects if the URL does not point to
+ the filesystem.
+ """
+ from ._termui_impl import open_url
+ return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead. This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+ """Fetches a single character from the terminal and returns it. This
+ will always return a unicode character and under certain rare
+ circumstances this might return more than one character. The
+ situations which more than one character is returned is when for
+ whatever reason multiple characters end up in the terminal buffer or
+ standard input was not actually a terminal.
+
+ Note that this will always read from the terminal, even if something
+ is piped into the standard input.
+
+ .. versionadded:: 2.0
+
+ :param echo: if set to `True`, the character read will also show up on
+ the terminal. The default is to not show it.
+ """
+ f = _getchar
+ if f is None:
+ from ._termui_impl import getchar as f
+ return f(echo)
+
+
+def pause(info='Press any key to continue ...', err=False):
+ """This command stops execution and waits for the user to press any
+ key to continue. This is similar to the Windows batch "pause"
+ command. If the program is not run through a terminal, this command
+ will instead do nothing.
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param info: the info string to print before pausing.
+ :param err: if set to message goes to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ if not isatty(sys.stdin) or not isatty(sys.stdout):
+ return
+ try:
+ if info:
+ echo(info, nl=False, err=err)
+ try:
+ getchar()
+ except (KeyboardInterrupt, EOFError):
+ pass
+ finally:
+ if info:
+ echo(err=err)
diff --git a/app/lib/click/testing.py b/app/lib/click/testing.py
new file mode 100644
index 0000000..4416c77
--- /dev/null
+++ b/app/lib/click/testing.py
@@ -0,0 +1,322 @@
+import os
+import sys
+import shutil
+import tempfile
+import contextlib
+
+from ._compat import iteritems, PY2
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered. Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+if PY2:
+ from cStringIO import StringIO
+else:
+ import io
+ from ._compat import _find_binary_reader
+
+
+class EchoingStdin(object):
+
+ def __init__(self, input, output):
+ self._input = input
+ self._output = output
+
+ def __getattr__(self, x):
+ return getattr(self._input, x)
+
+ def _echo(self, rv):
+ self._output.write(rv)
+ return rv
+
+ def read(self, n=-1):
+ return self._echo(self._input.read(n))
+
+ def readline(self, n=-1):
+ return self._echo(self._input.readline(n))
+
+ def readlines(self):
+ return [self._echo(x) for x in self._input.readlines()]
+
+ def __iter__(self):
+ return iter(self._echo(x) for x in self._input)
+
+ def __repr__(self):
+ return repr(self._input)
+
+
+def make_input_stream(input, charset):
+ # Is already an input stream.
+ if hasattr(input, 'read'):
+ if PY2:
+ return input
+ rv = _find_binary_reader(input)
+ if rv is not None:
+ return rv
+ raise TypeError('Could not find binary reader for input stream.')
+
+ if input is None:
+ input = b''
+ elif not isinstance(input, bytes):
+ input = input.encode(charset)
+ if PY2:
+ return StringIO(input)
+ return io.BytesIO(input)
+
+
+class Result(object):
+ """Holds the captured result of an invoked CLI script."""
+
+ def __init__(self, runner, output_bytes, exit_code, exception,
+ exc_info=None):
+ #: The runner that created the result
+ self.runner = runner
+ #: The output as bytes.
+ self.output_bytes = output_bytes
+ #: The exit code as integer.
+ self.exit_code = exit_code
+ #: The exception that happend if one did.
+ self.exception = exception
+ #: The traceback
+ self.exc_info = exc_info
+
+ @property
+ def output(self):
+ """The output as unicode string."""
+ return self.output_bytes.decode(self.runner.charset, 'replace') \
+ .replace('\r\n', '\n')
+
+ def __repr__(self):
+ return '' % (
+ self.exception and repr(self.exception) or 'okay',
+ )
+
+
+class CliRunner(object):
+ """The CLI runner provides functionality to invoke a Click command line
+ script for unittesting purposes in a isolated environment. This only
+ works in single-threaded systems without any concurrency as it changes the
+ global interpreter state.
+
+ :param charset: the character set for the input and output data. This is
+ UTF-8 by default and should not be changed currently as
+ the reporting to Click only works in Python 2 properly.
+ :param env: a dictionary with environment variables for overriding.
+ :param echo_stdin: if this is set to `True`, then reading from stdin writes
+ to stdout. This is useful for showing examples in
+ some circumstances. Note that regular prompts
+ will automatically echo the input.
+ """
+
+ def __init__(self, charset=None, env=None, echo_stdin=False):
+ if charset is None:
+ charset = 'utf-8'
+ self.charset = charset
+ self.env = env or {}
+ self.echo_stdin = echo_stdin
+
+ def get_default_prog_name(self, cli):
+ """Given a command object it will return the default program name
+ for it. The default is the `name` attribute or ``"root"`` if not
+ set.
+ """
+ return cli.name or 'root'
+
+ def make_env(self, overrides=None):
+ """Returns the environment overrides for invoking a script."""
+ rv = dict(self.env)
+ if overrides:
+ rv.update(overrides)
+ return rv
+
+ @contextlib.contextmanager
+ def isolation(self, input=None, env=None, color=False):
+ """A context manager that sets up the isolation for invoking of a
+ command line tool. This sets up stdin with the given input data
+ and `os.environ` with the overrides from the given dictionary.
+ This also rebinds some internals in Click to be mocked (like the
+ prompt functionality).
+
+ This is automatically done in the :meth:`invoke` method.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param input: the input stream to put into sys.stdin.
+ :param env: the environment overrides as dictionary.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ input = make_input_stream(input, self.charset)
+
+ old_stdin = sys.stdin
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ old_forced_width = clickpkg.formatting.FORCED_WIDTH
+ clickpkg.formatting.FORCED_WIDTH = 80
+
+ env = self.make_env(env)
+
+ if PY2:
+ sys.stdout = sys.stderr = bytes_output = StringIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ else:
+ bytes_output = io.BytesIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ input = io.TextIOWrapper(input, encoding=self.charset)
+ sys.stdout = sys.stderr = io.TextIOWrapper(
+ bytes_output, encoding=self.charset)
+
+ sys.stdin = input
+
+ def visible_input(prompt=None):
+ sys.stdout.write(prompt or '')
+ val = input.readline().rstrip('\r\n')
+ sys.stdout.write(val + '\n')
+ sys.stdout.flush()
+ return val
+
+ def hidden_input(prompt=None):
+ sys.stdout.write((prompt or '') + '\n')
+ sys.stdout.flush()
+ return input.readline().rstrip('\r\n')
+
+ def _getchar(echo):
+ char = sys.stdin.read(1)
+ if echo:
+ sys.stdout.write(char)
+ sys.stdout.flush()
+ return char
+
+ default_color = color
+ def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ return not default_color
+ return not color
+
+ old_visible_prompt_func = clickpkg.termui.visible_prompt_func
+ old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
+ old__getchar_func = clickpkg.termui._getchar
+ old_should_strip_ansi = clickpkg.utils.should_strip_ansi
+ clickpkg.termui.visible_prompt_func = visible_input
+ clickpkg.termui.hidden_prompt_func = hidden_input
+ clickpkg.termui._getchar = _getchar
+ clickpkg.utils.should_strip_ansi = should_strip_ansi
+
+ old_env = {}
+ try:
+ for key, value in iteritems(env):
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ yield bytes_output
+ finally:
+ for key, value in iteritems(old_env):
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ sys.stdin = old_stdin
+ clickpkg.termui.visible_prompt_func = old_visible_prompt_func
+ clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
+ clickpkg.termui._getchar = old__getchar_func
+ clickpkg.utils.should_strip_ansi = old_should_strip_ansi
+ clickpkg.formatting.FORCED_WIDTH = old_forced_width
+
+ def invoke(self, cli, args=None, input=None, env=None,
+ catch_exceptions=True, color=False, **extra):
+ """Invokes a command in an isolated environment. The arguments are
+ forwarded directly to the command line script, the `extra` keyword
+ arguments are passed to the :meth:`~clickpkg.Command.main` function of
+ the command.
+
+ This returns a :class:`Result` object.
+
+ .. versionadded:: 3.0
+ The ``catch_exceptions`` parameter was added.
+
+ .. versionchanged:: 3.0
+ The result object now has an `exc_info` attribute with the
+ traceback if available.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param cli: the command to invoke
+ :param args: the arguments to invoke
+ :param input: the input data for `sys.stdin`.
+ :param env: the environment overrides.
+ :param catch_exceptions: Whether to catch any other exceptions than
+ ``SystemExit``.
+ :param extra: the keyword arguments to pass to :meth:`main`.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ exc_info = None
+ with self.isolation(input=input, env=env, color=color) as out:
+ exception = None
+ exit_code = 0
+
+ try:
+ cli.main(args=args or (),
+ prog_name=self.get_default_prog_name(cli), **extra)
+ except SystemExit as e:
+ if e.code != 0:
+ exception = e
+
+ exc_info = sys.exc_info()
+
+ exit_code = e.code
+ if not isinstance(exit_code, int):
+ sys.stdout.write(str(exit_code))
+ sys.stdout.write('\n')
+ exit_code = 1
+ except Exception as e:
+ if not catch_exceptions:
+ raise
+ exception = e
+ exit_code = -1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+ output = out.getvalue()
+
+ return Result(runner=self,
+ output_bytes=output,
+ exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info)
+
+ @contextlib.contextmanager
+ def isolated_filesystem(self):
+ """A context manager that creates a temporary folder and changes
+ the current working directory to it for isolated filesystem tests.
+ """
+ cwd = os.getcwd()
+ t = tempfile.mkdtemp()
+ os.chdir(t)
+ try:
+ yield t
+ finally:
+ os.chdir(cwd)
+ try:
+ shutil.rmtree(t)
+ except (OSError, IOError):
+ pass
diff --git a/app/lib/click/types.py b/app/lib/click/types.py
new file mode 100644
index 0000000..3639002
--- /dev/null
+++ b/app/lib/click/types.py
@@ -0,0 +1,550 @@
+import os
+import stat
+
+from ._compat import open_stream, text_type, filename_to_ui, \
+ get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2
+from .exceptions import BadParameter
+from .utils import safecall, LazyFile
+
+
+class ParamType(object):
+ """Helper for converting values through types. The following is
+ necessary for a valid type:
+
+ * it needs a name
+ * it needs to pass through None unchanged
+ * it needs to convert from a string
+ * it needs to convert its result type through unchanged
+ (eg: needs to be idempotent)
+ * it needs to be able to deal with param and context being `None`.
+ This can be the case when the object is used with prompt
+ inputs.
+ """
+ is_composite = False
+
+ #: the descriptive name of this type
+ name = None
+
+ #: if a list of this type is expected and the value is pulled from a
+ #: string environment variable, this is what splits it up. `None`
+ #: means any whitespace. For all parameters the general rule is that
+ #: whitespace splits them up. The exception are paths and files which
+ #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+ #: Windows).
+ envvar_list_splitter = None
+
+ def __call__(self, value, param=None, ctx=None):
+ if value is not None:
+ return self.convert(value, param, ctx)
+
+ def get_metavar(self, param):
+ """Returns the metavar default for this param if it provides one."""
+
+ def get_missing_message(self, param):
+ """Optionally might return extra information about a missing
+ parameter.
+
+ .. versionadded:: 2.0
+ """
+
+ def convert(self, value, param, ctx):
+ """Converts the value. This is not invoked for values that are
+ `None` (the missing value).
+ """
+ return value
+
+ def split_envvar_value(self, rv):
+ """Given a value from an environment variable this splits it up
+ into small chunks depending on the defined envvar list splitter.
+
+ If the splitter is set to `None`, which means that whitespace splits,
+ then leading and trailing whitespace is ignored. Otherwise, leading
+ and trailing splitters usually lead to empty items being included.
+ """
+ return (rv or '').split(self.envvar_list_splitter)
+
+ def fail(self, message, param=None, ctx=None):
+ """Helper method to fail with an invalid value message."""
+ raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+ is_composite = True
+
+ @property
+ def arity(self):
+ raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+
+ def __init__(self, func):
+ self.name = func.__name__
+ self.func = func
+
+ def convert(self, value, param, ctx):
+ try:
+ return self.func(value)
+ except ValueError:
+ try:
+ value = text_type(value)
+ except UnicodeError:
+ value = str(value).decode('utf-8', 'replace')
+ self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+ name = 'text'
+
+ def convert(self, value, param, ctx):
+ return value
+
+ def __repr__(self):
+ return 'UNPROCESSED'
+
+
+class StringParamType(ParamType):
+ name = 'text'
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bytes):
+ enc = _get_argv_encoding()
+ try:
+ value = value.decode(enc)
+ except UnicodeError:
+ fs_enc = get_filesystem_encoding()
+ if fs_enc != enc:
+ try:
+ value = value.decode(fs_enc)
+ except UnicodeError:
+ value = value.decode('utf-8', 'replace')
+ return value
+ return value
+
+ def __repr__(self):
+ return 'STRING'
+
+
+class Choice(ParamType):
+ """The choice type allows a value to be checked against a fixed set of
+ supported values. All of these values have to be strings.
+
+ See :ref:`choice-opts` for an example.
+ """
+ name = 'choice'
+
+ def __init__(self, choices):
+ self.choices = choices
+
+ def get_metavar(self, param):
+ return '[%s]' % '|'.join(self.choices)
+
+ def get_missing_message(self, param):
+ return 'Choose from %s.' % ', '.join(self.choices)
+
+ def convert(self, value, param, ctx):
+ # Exact match
+ if value in self.choices:
+ return value
+
+ # Match through normalization
+ if ctx is not None and \
+ ctx.token_normalize_func is not None:
+ value = ctx.token_normalize_func(value)
+ for choice in self.choices:
+ if ctx.token_normalize_func(choice) == value:
+ return choice
+
+ self.fail('invalid choice: %s. (choose from %s)' %
+ (value, ', '.join(self.choices)), param, ctx)
+
+ def __repr__(self):
+ return 'Choice(%r)' % list(self.choices)
+
+
+class IntParamType(ParamType):
+ name = 'integer'
+
+ def convert(self, value, param, ctx):
+ try:
+ return int(value)
+ except (ValueError, UnicodeError):
+ self.fail('%s is not a valid integer' % value, param, ctx)
+
+ def __repr__(self):
+ return 'INT'
+
+
+class IntRange(IntParamType):
+ """A parameter that works similar to :data:`click.INT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+ name = 'integer range'
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = IntParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if self.min is not None and rv < self.min or \
+ self.max is not None and rv > self.max:
+ if self.min is None:
+ self.fail('%s is bigger than the maximum valid value '
+ '%s.' % (rv, self.max), param, ctx)
+ elif self.max is None:
+ self.fail('%s is smaller than the minimum valid value '
+ '%s.' % (rv, self.min), param, ctx)
+ else:
+ self.fail('%s is not in the valid range of %s to %s.'
+ % (rv, self.min, self.max), param, ctx)
+ return rv
+
+ def __repr__(self):
+ return 'IntRange(%r, %r)' % (self.min, self.max)
+
+
+class BoolParamType(ParamType):
+ name = 'boolean'
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bool):
+ return bool(value)
+ value = value.lower()
+ if value in ('true', '1', 'yes', 'y'):
+ return True
+ elif value in ('false', '0', 'no', 'n'):
+ return False
+ self.fail('%s is not a valid boolean' % value, param, ctx)
+
+ def __repr__(self):
+ return 'BOOL'
+
+
+class FloatParamType(ParamType):
+ name = 'float'
+
+ def convert(self, value, param, ctx):
+ try:
+ return float(value)
+ except (UnicodeError, ValueError):
+ self.fail('%s is not a valid floating point value' %
+ value, param, ctx)
+
+ def __repr__(self):
+ return 'FLOAT'
+
+
+class UUIDParameterType(ParamType):
+ name = 'uuid'
+
+ def convert(self, value, param, ctx):
+ import uuid
+ try:
+ if PY2 and isinstance(value, text_type):
+ value = value.encode('ascii')
+ return uuid.UUID(value)
+ except (UnicodeError, ValueError):
+ self.fail('%s is not a valid UUID value' % value, param, ctx)
+
+ def __repr__(self):
+ return 'UUID'
+
+
+class File(ParamType):
+ """Declares a parameter to be a file for reading or writing. The file
+ is automatically closed once the context tears down (after the command
+ finished working).
+
+ Files can be opened for reading or writing. The special value ``-``
+ indicates stdin or stdout depending on the mode.
+
+ By default, the file is opened for reading text data, but it can also be
+ opened in binary mode or for writing. The encoding parameter can be used
+ to force a specific encoding.
+
+ The `lazy` flag controls if the file should be opened immediately or
+ upon first IO. The default is to be non lazy for standard input and
+ output streams as well as files opened for reading, lazy otherwise.
+
+ Starting with Click 2.0, files can also be opened atomically in which
+ case all writes go into a separate file in the same folder and upon
+ completion the file will be moved over to the original location. This
+ is useful if a file regularly read by other users is modified.
+
+ See :ref:`file-args` for more information.
+ """
+ name = 'filename'
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
+ atomic=False):
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.lazy = lazy
+ self.atomic = atomic
+
+ def resolve_lazy_flag(self, value):
+ if self.lazy is not None:
+ return self.lazy
+ if value == '-':
+ return False
+ elif 'w' in self.mode:
+ return True
+ return False
+
+ def convert(self, value, param, ctx):
+ try:
+ if hasattr(value, 'read') or hasattr(value, 'write'):
+ return value
+
+ lazy = self.resolve_lazy_flag(value)
+
+ if lazy:
+ f = LazyFile(value, self.mode, self.encoding, self.errors,
+ atomic=self.atomic)
+ if ctx is not None:
+ ctx.call_on_close(f.close_intelligently)
+ return f
+
+ f, should_close = open_stream(value, self.mode,
+ self.encoding, self.errors,
+ atomic=self.atomic)
+ # If a context is provided, we automatically close the file
+ # at the end of the context execution (or flush out). If a
+ # context does not exist, it's the caller's responsibility to
+ # properly close the file. This for instance happens when the
+ # type is used with prompts.
+ if ctx is not None:
+ if should_close:
+ ctx.call_on_close(safecall(f.close))
+ else:
+ ctx.call_on_close(safecall(f.flush))
+ return f
+ except (IOError, OSError) as e:
+ self.fail('Could not open file: %s: %s' % (
+ filename_to_ui(value),
+ get_streerror(e),
+ ), param, ctx)
+
+
+class Path(ParamType):
+ """The path type is similar to the :class:`File` type but it performs
+ different checks. First of all, instead of returning an open file
+ handle it returns just the filename. Secondly, it can perform various
+ basic checks about what the file or directory should be.
+
+ .. versionchanged:: 6.0
+ `allow_dash` was added.
+
+ :param exists: if set to true, the file or directory needs to exist for
+ this value to be valid. If this is not required and a
+ file does indeed not exist, then all further checks are
+ silently skipped.
+ :param file_okay: controls if a file is a possible value.
+ :param dir_okay: controls if a directory is a possible value.
+ :param writable: if true, a writable check is performed.
+ :param readable: if true, a readable check is performed.
+ :param resolve_path: if this is true, then the path is fully resolved
+ before the value is passed onwards. This means
+ that it's absolute and symlinks are resolved.
+ :param allow_dash: If this is set to `True`, a single dash to indicate
+ standard streams is permitted.
+ :param type: optionally a string type that should be used to
+ represent the path. The default is `None` which
+ means the return value will be either bytes or
+ unicode depending on what makes most sense given the
+ input data Click deals with.
+ """
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(self, exists=False, file_okay=True, dir_okay=True,
+ writable=False, readable=True, resolve_path=False,
+ allow_dash=False, path_type=None):
+ self.exists = exists
+ self.file_okay = file_okay
+ self.dir_okay = dir_okay
+ self.writable = writable
+ self.readable = readable
+ self.resolve_path = resolve_path
+ self.allow_dash = allow_dash
+ self.type = path_type
+
+ if self.file_okay and not self.dir_okay:
+ self.name = 'file'
+ self.path_type = 'File'
+ if self.dir_okay and not self.file_okay:
+ self.name = 'directory'
+ self.path_type = 'Directory'
+ else:
+ self.name = 'path'
+ self.path_type = 'Path'
+
+ def coerce_path_result(self, rv):
+ if self.type is not None and not isinstance(rv, self.type):
+ if self.type is text_type:
+ rv = rv.decode(get_filesystem_encoding())
+ else:
+ rv = rv.encode(get_filesystem_encoding())
+ return rv
+
+ def convert(self, value, param, ctx):
+ rv = value
+
+ is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-')
+
+ if not is_dash:
+ if self.resolve_path:
+ rv = os.path.realpath(rv)
+
+ try:
+ st = os.stat(rv)
+ except OSError:
+ if not self.exists:
+ return self.coerce_path_result(rv)
+ self.fail('%s "%s" does not exist.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+
+ if not self.file_okay and stat.S_ISREG(st.st_mode):
+ self.fail('%s "%s" is a file.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+ self.fail('%s "%s" is a directory.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if self.writable and not os.access(value, os.W_OK):
+ self.fail('%s "%s" is not writable.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+ if self.readable and not os.access(value, os.R_OK):
+ self.fail('%s "%s" is not readable.' % (
+ self.path_type,
+ filename_to_ui(value)
+ ), param, ctx)
+
+ return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+ """The default behavior of Click is to apply a type on a value directly.
+ This works well in most cases, except for when `nargs` is set to a fixed
+ count and different types should be used for different items. In this
+ case the :class:`Tuple` type can be used. This type can only be used
+ if `nargs` is set to a fixed number.
+
+ For more information see :ref:`tuple-type`.
+
+ This can be selected by using a Python tuple literal as a type.
+
+ :param types: a list of types that should be used for the tuple items.
+ """
+
+ def __init__(self, types):
+ self.types = [convert_type(ty) for ty in types]
+
+ @property
+ def name(self):
+ return "<" + " ".join(ty.name for ty in self.types) + ">"
+
+ @property
+ def arity(self):
+ return len(self.types)
+
+ def convert(self, value, param, ctx):
+ if len(value) != len(self.types):
+ raise TypeError('It would appear that nargs is set to conflict '
+ 'with the composite type arity.')
+ return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+ """Converts a callable or python ty into the most appropriate param
+ ty.
+ """
+ guessed_type = False
+ if ty is None and default is not None:
+ if isinstance(default, tuple):
+ ty = tuple(map(type, default))
+ else:
+ ty = type(default)
+ guessed_type = True
+
+ if isinstance(ty, tuple):
+ return Tuple(ty)
+ if isinstance(ty, ParamType):
+ return ty
+ if ty is text_type or ty is str or ty is None:
+ return STRING
+ if ty is int:
+ return INT
+ # Booleans are only okay if not guessed. This is done because for
+ # flags the default value is actually a bit of a lie in that it
+ # indicates which of the flags is the one we want. See get_default()
+ # for more information.
+ if ty is bool and not guessed_type:
+ return BOOL
+ if ty is float:
+ return FLOAT
+ if guessed_type:
+ return STRING
+
+ # Catch a common mistake
+ if __debug__:
+ try:
+ if issubclass(ty, ParamType):
+ raise AssertionError('Attempted to use an uninstantiated '
+ 'parameter type (%s).' % ty)
+ except TypeError:
+ pass
+ return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing. From a user's
+#: perspective this appears to just be the same as `STRING` but internally
+#: no string conversion takes place. This is necessary to achieve the
+#: same bytes/unicode behavior on Python 2/3 in situations where you want
+#: to not convert argument types. This is usually useful when working
+#: with file paths as they can appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default. This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter. This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter. This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter. This is the default for boolean flags. This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
diff --git a/app/lib/click/utils.py b/app/lib/click/utils.py
new file mode 100644
index 0000000..eee626d
--- /dev/null
+++ b/app/lib/click/utils.py
@@ -0,0 +1,415 @@
+import os
+import sys
+
+from .globals import resolve_color_default
+
+from ._compat import text_type, open_stream, get_filesystem_encoding, \
+ get_streerror, string_types, PY2, binary_streams, text_streams, \
+ filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
+ _default_text_stdout, _default_text_stderr, is_bytes, WIN
+
+if not PY2:
+ from ._compat import _find_binary_writer
+elif WIN:
+ from ._winconsole import _get_windows_argv, \
+ _hash_py_argv, _initial_argv_hash
+
+
+echo_native_types = string_types + (bytes, bytearray)
+
+
+def _posixify(name):
+ return '-'.join(name.split()).lower()
+
+
+def safecall(func):
+ """Wraps a function so that it swallows exceptions."""
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ pass
+ return wrapper
+
+
+def make_str(value):
+ """Converts a value into a valid string."""
+ if isinstance(value, bytes):
+ try:
+ return value.decode(get_filesystem_encoding())
+ except UnicodeError:
+ return value.decode('utf-8', 'replace')
+ return text_type(value)
+
+
+def make_default_short_help(help, max_length=45):
+ words = help.split()
+ total_length = 0
+ result = []
+ done = False
+
+ for word in words:
+ if word[-1:] == '.':
+ done = True
+ new_length = result and 1 + len(word) or len(word)
+ if total_length + new_length > max_length:
+ result.append('...')
+ done = True
+ else:
+ if result:
+ result.append(' ')
+ result.append(word)
+ if done:
+ break
+ total_length += new_length
+
+ return ''.join(result)
+
+
+class LazyFile(object):
+ """A lazy file works like a regular file but it does not fully open
+ the file but it does perform some basic checks early to see if the
+ filename parameter does make sense. This is useful for safely opening
+ files for writing.
+ """
+
+ def __init__(self, filename, mode='r', encoding=None, errors='strict',
+ atomic=False):
+ self.name = filename
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.atomic = atomic
+
+ if filename == '-':
+ self._f, self.should_close = open_stream(filename, mode,
+ encoding, errors)
+ else:
+ if 'r' in mode:
+ # Open and close the file in case we're opening it for
+ # reading so that we can catch at least some errors in
+ # some cases early.
+ open(filename, mode).close()
+ self._f = None
+ self.should_close = True
+
+ def __getattr__(self, name):
+ return getattr(self.open(), name)
+
+ def __repr__(self):
+ if self._f is not None:
+ return repr(self._f)
+ return '' % (self.name, self.mode)
+
+ def open(self):
+ """Opens the file if it's not yet open. This call might fail with
+ a :exc:`FileError`. Not handling this error will produce an error
+ that Click shows.
+ """
+ if self._f is not None:
+ return self._f
+ try:
+ rv, self.should_close = open_stream(self.name, self.mode,
+ self.encoding,
+ self.errors,
+ atomic=self.atomic)
+ except (IOError, OSError) as e:
+ from .exceptions import FileError
+ raise FileError(self.name, hint=get_streerror(e))
+ self._f = rv
+ return rv
+
+ def close(self):
+ """Closes the underlying file, no matter what."""
+ if self._f is not None:
+ self._f.close()
+
+ def close_intelligently(self):
+ """This function only closes the file if it was opened by the lazy
+ file wrapper. For instance this will never close stdin.
+ """
+ if self.should_close:
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close_intelligently()
+
+ def __iter__(self):
+ self.open()
+ return iter(self._f)
+
+
+class KeepOpenFile(object):
+
+ def __init__(self, file):
+ self._file = file
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ pass
+
+ def __repr__(self):
+ return repr(self._file)
+
+ def __iter__(self):
+ return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+ """Prints a message plus a newline to the given file or stdout. On
+ first sight, this looks like the print function, but it has improved
+ support for handling Unicode and binary data that does not fail no
+ matter how badly configured the system is.
+
+ Primarily it means that you can print binary data as well as Unicode
+ data on both 2.x and 3.x to the given file in the most appropriate way
+ possible. This is a very carefree function as in that it will try its
+ best to not fail. As of Click 6.0 this includes support for unicode
+ output on the Windows console.
+
+ In addition to that, if `colorama`_ is installed, the echo function will
+ also support clever handling of ANSI codes. Essentially it will then
+ do the following:
+
+ - add transparent handling of ANSI color codes on Windows.
+ - hide ANSI codes automatically if the destination file is not a
+ terminal.
+
+ .. _colorama: http://pypi.python.org/pypi/colorama
+
+ .. versionchanged:: 6.0
+ As of Click 6.0 the echo function will properly support unicode
+ output on the windows console. Not that click does not modify
+ the interpreter in any way which means that `sys.stdout` or the
+ print statement or function will still not provide unicode support.
+
+ .. versionchanged:: 2.0
+ Starting with version 2.0 of Click, the echo function will work
+ with colorama if it's installed.
+
+ .. versionadded:: 3.0
+ The `err` parameter was added.
+
+ .. versionchanged:: 4.0
+ Added the `color` flag.
+
+ :param message: the message to print
+ :param file: the file to write to (defaults to ``stdout``)
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``. This is faster and easier than calling
+ :func:`get_text_stderr` yourself.
+ :param nl: if set to `True` (the default) a newline is printed afterwards.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection.
+ """
+ if file is None:
+ if err:
+ file = _default_text_stderr()
+ else:
+ file = _default_text_stdout()
+
+ # Convert non bytes/text into the native string type.
+ if message is not None and not isinstance(message, echo_native_types):
+ message = text_type(message)
+
+ if nl:
+ message = message or u''
+ if isinstance(message, text_type):
+ message += u'\n'
+ else:
+ message += b'\n'
+
+ # If there is a message, and we're in Python 3, and the value looks
+ # like bytes, we manually need to find the binary stream and write the
+ # message in there. This is done separately so that most stream
+ # types will work as you would expect. Eg: you can write to StringIO
+ # for other cases.
+ if message and not PY2 and is_bytes(message):
+ binary_file = _find_binary_writer(file)
+ if binary_file is not None:
+ file.flush()
+ binary_file.write(message)
+ binary_file.flush()
+ return
+
+ # ANSI-style support. If there is no message or we are dealing with
+ # bytes nothing is happening. If we are connected to a file we want
+ # to strip colors. If we are on windows we either wrap the stream
+ # to strip the color or we use the colorama support to translate the
+ # ansi codes to API calls.
+ if message and not is_bytes(message):
+ color = resolve_color_default(color)
+ if should_strip_ansi(file, color):
+ message = strip_ansi(message)
+ elif WIN:
+ if auto_wrap_for_ansi is not None:
+ file = auto_wrap_for_ansi(file)
+ elif not color:
+ message = strip_ansi(message)
+
+ if message:
+ file.write(message)
+ file.flush()
+
+
+def get_binary_stream(name):
+ """Returns a system stream for byte processing. This essentially
+ returns the stream from the sys module with the given name but it
+ solves some compatibility issues between different Python versions.
+ Primarily this function is necessary for getting binary streams on
+ Python 3.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ """
+ opener = binary_streams.get(name)
+ if opener is None:
+ raise TypeError('Unknown standard stream %r' % name)
+ return opener()
+
+
+def get_text_stream(name, encoding=None, errors='strict'):
+ """Returns a system stream for text processing. This usually returns
+ a wrapped stream around a binary stream returned from
+ :func:`get_binary_stream` but it also can take shortcuts on Python 3
+ for already correctly configured streams.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ :param encoding: overrides the detected default encoding.
+ :param errors: overrides the default error mode.
+ """
+ opener = text_streams.get(name)
+ if opener is None:
+ raise TypeError('Unknown standard stream %r' % name)
+ return opener(encoding, errors)
+
+
+def open_file(filename, mode='r', encoding=None, errors='strict',
+ lazy=False, atomic=False):
+ """This is similar to how the :class:`File` works but for manual
+ usage. Files are opened non lazy by default. This can open regular
+ files as well as stdin/stdout if ``'-'`` is passed.
+
+ If stdin/stdout is returned the stream is wrapped so that the context
+ manager will not close the stream accidentally. This makes it possible
+ to always use the function like this without having to worry to
+ accidentally close a standard stream::
+
+ with open_file(filename) as f:
+ ...
+
+ .. versionadded:: 3.0
+
+ :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+ :param mode: the mode in which to open the file.
+ :param encoding: the encoding to use.
+ :param errors: the error handling for this file.
+ :param lazy: can be flipped to true to open the file lazily.
+ :param atomic: in atomic mode writes go into a temporary file and it's
+ moved on close.
+ """
+ if lazy:
+ return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+ f, should_close = open_stream(filename, mode, encoding, errors,
+ atomic=atomic)
+ if not should_close:
+ f = KeepOpenFile(f)
+ return f
+
+
+def get_os_args():
+ """This returns the argument part of sys.argv in the most appropriate
+ form for processing. What this means is that this return value is in
+ a format that works for Click to process but does not necessarily
+ correspond well to what's actually standard for the interpreter.
+
+ On most environments the return value is ``sys.argv[:1]`` unchanged.
+ However if you are on Windows and running Python 2 the return value
+ will actually be a list of unicode strings instead because the
+ default behavior on that platform otherwise will not be able to
+ carry all possible values that sys.argv can have.
+
+ .. versionadded:: 6.0
+ """
+ # We can only extract the unicode argv if sys.argv has not been
+ # changed since the startup of the application.
+ if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
+ return _get_windows_argv()
+ return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+ """Formats a filename for user display. The main purpose of this
+ function is to ensure that the filename can be displayed at all. This
+ will decode the filename to unicode if necessary in a way that it will
+ not fail. Optionally, it can shorten the filename to not include the
+ full path to the filename.
+
+ :param filename: formats a filename for UI display. This will also convert
+ the filename into unicode without failing.
+ :param shorten: this optionally shortens the filename to strip of the
+ path that leads up to it.
+ """
+ if shorten:
+ filename = os.path.basename(filename)
+ return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+ r"""Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ To give you an idea, for an app called ``"Foo Bar"``, something like
+ the following folders could be returned:
+
+ Mac OS X:
+ ``~/Library/Application Support/Foo Bar``
+ Mac OS X (POSIX):
+ ``~/.foo-bar``
+ Unix:
+ ``~/.config/foo-bar``
+ Unix (POSIX):
+ ``~/.foo-bar``
+ Win XP (roaming):
+ ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar``
+ Win XP (not roaming):
+ ``C:\Documents and Settings\\Application Data\Foo Bar``
+ Win 7 (roaming):
+ ``C:\Users\\AppData\Roaming\Foo Bar``
+ Win 7 (not roaming):
+ ``C:\Users\\AppData\Local\Foo Bar``
+
+ .. versionadded:: 2.0
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no affect otherwise.
+ :param force_posix: if this is set to `True` then on any POSIX system the
+ folder will be stored in the home folder with a leading
+ dot instead of the XDG config home or darwin's
+ application support folder.
+ """
+ if WIN:
+ key = roaming and 'APPDATA' or 'LOCALAPPDATA'
+ folder = os.environ.get(key)
+ if folder is None:
+ folder = os.path.expanduser('~')
+ return os.path.join(folder, app_name)
+ if force_posix:
+ return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
+ if sys.platform == 'darwin':
+ return os.path.join(os.path.expanduser(
+ '~/Library/Application Support'), app_name)
+ return os.path.join(
+ os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
+ _posixify(app_name))
diff --git a/app/lib/dateutil/__init__.py b/app/lib/dateutil/__init__.py
new file mode 100644
index 0000000..ba89aa7
--- /dev/null
+++ b/app/lib/dateutil/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+__version__ = "2.6.0"
diff --git a/app/lib/dateutil/_common.py b/app/lib/dateutil/_common.py
new file mode 100644
index 0000000..cd2a338
--- /dev/null
+++ b/app/lib/dateutil/_common.py
@@ -0,0 +1,33 @@
+"""
+Common code used in multiple modules.
+"""
+
+class weekday(object):
+ __slots__ = ["weekday", "n"]
+
+ def __init__(self, weekday, n=None):
+ self.weekday = weekday
+ self.n = n
+
+ def __call__(self, n):
+ if n == self.n:
+ return self
+ else:
+ return self.__class__(self.weekday, n)
+
+ def __eq__(self, other):
+ try:
+ if self.weekday != other.weekday or self.n != other.n:
+ return False
+ except AttributeError:
+ return False
+ return True
+
+ __hash__ = None
+
+ def __repr__(self):
+ s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
+ if not self.n:
+ return s
+ else:
+ return "%s(%+d)" % (s, self.n)
diff --git a/app/lib/dateutil/easter.py b/app/lib/dateutil/easter.py
new file mode 100644
index 0000000..e4def97
--- /dev/null
+++ b/app/lib/dateutil/easter.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a generic easter computing method for any given year, using
+Western, Orthodox or Julian algorithms.
+"""
+
+import datetime
+
+__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
+
+EASTER_JULIAN = 1
+EASTER_ORTHODOX = 2
+EASTER_WESTERN = 3
+
+
+def easter(year, method=EASTER_WESTERN):
+ """
+ This method was ported from the work done by GM Arts,
+ on top of the algorithm by Claus Tondering, which was
+ based in part on the algorithm of Ouding (1940), as
+ quoted in "Explanatory Supplement to the Astronomical
+ Almanac", P. Kenneth Seidelmann, editor.
+
+ This algorithm implements three different easter
+ calculation methods:
+
+ 1 - Original calculation in Julian calendar, valid in
+ dates after 326 AD
+ 2 - Original method, with date converted to Gregorian
+ calendar, valid in years 1583 to 4099
+ 3 - Revised method, in Gregorian calendar, valid in
+ years 1583 to 4099 as well
+
+ These methods are represented by the constants:
+
+ * ``EASTER_JULIAN = 1``
+ * ``EASTER_ORTHODOX = 2``
+ * ``EASTER_WESTERN = 3``
+
+ The default method is method 3.
+
+ More about the algorithm may be found at:
+
+ http://users.chariot.net.au/~gmarts/eastalg.htm
+
+ and
+
+ http://www.tondering.dk/claus/calendar.html
+
+ """
+
+ if not (1 <= method <= 3):
+ raise ValueError("invalid method")
+
+ # g - Golden year - 1
+ # c - Century
+ # h - (23 - Epact) mod 30
+ # i - Number of days from March 21 to Paschal Full Moon
+ # j - Weekday for PFM (0=Sunday, etc)
+ # p - Number of days from March 21 to Sunday on or before PFM
+ # (-6 to 28 methods 1 & 3, to 56 for method 2)
+ # e - Extra days to add for method 2 (converting Julian
+ # date to Gregorian date)
+
+ y = year
+ g = y % 19
+ e = 0
+ if method < 3:
+ # Old method
+ i = (19*g + 15) % 30
+ j = (y + y//4 + i) % 7
+ if method == 2:
+ # Extra dates to convert Julian to Gregorian date
+ e = 10
+ if y > 1600:
+ e = e + y//100 - 16 - (y//100 - 16)//4
+ else:
+ # New method
+ c = y//100
+ h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
+ i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
+ j = (y + y//4 + i + 2 - c + c//4) % 7
+
+ # p can be from -6 to 56 corresponding to dates 22 March to 23 May
+ # (later dates apply to method 2, although 23 May never actually occurs)
+ p = i - j + e
+ d = 1 + (p + 27 + (p + 6)//40) % 31
+ m = 3 + (p + 26)//30
+ return datetime.date(int(y), int(m), int(d))
diff --git a/app/lib/dateutil/parser.py b/app/lib/dateutil/parser.py
new file mode 100644
index 0000000..147b3f2
--- /dev/null
+++ b/app/lib/dateutil/parser.py
@@ -0,0 +1,1360 @@
+# -*- coding:iso-8859-1 -*-
+"""
+This module offers a generic date/time string parser which is able to parse
+most known formats to represent a date and/or time.
+
+This module attempts to be forgiving with regards to unlikely input formats,
+returning a datetime object even for dates which are ambiguous. If an element
+of a date/time stamp is omitted, the following rules are applied:
+- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
+ on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
+ specified.
+- If a time zone is omitted, a timezone-naive datetime is returned.
+
+If any other elements are missing, they are taken from the
+:class:`datetime.datetime` object passed to the parameter ``default``. If this
+results in a day number exceeding the valid number of days per month, the
+value falls back to the end of the month.
+
+Additional resources about date/time string formats can be found below:
+
+- `A summary of the international standard date and time notation
+ `_
+- `W3C Date and Time Formats `_
+- `Time Formats (Planetary Rings Node) `_
+- `CPAN ParseDate module
+ `_
+- `Java SimpleDateFormat Class
+ `_
+"""
+from __future__ import unicode_literals
+
+import datetime
+import string
+import time
+import collections
+import re
+from io import StringIO
+from calendar import monthrange, isleap
+
+from six import text_type, binary_type, integer_types
+
+from . import relativedelta
+from . import tz
+
+__all__ = ["parse", "parserinfo"]
+
+
+class _timelex(object):
+ # Fractional seconds are sometimes split by a comma
+ _split_decimal = re.compile("([\.,])")
+
+ def __init__(self, instream):
+ if isinstance(instream, binary_type):
+ instream = instream.decode()
+
+ if isinstance(instream, text_type):
+ instream = StringIO(instream)
+
+ if getattr(instream, 'read', None) is None:
+ raise TypeError('Parser must be a string or character stream, not '
+ '{itype}'.format(itype=instream.__class__.__name__))
+
+ self.instream = instream
+ self.charstack = []
+ self.tokenstack = []
+ self.eof = False
+
+ def get_token(self):
+ """
+ This function breaks the time string into lexical units (tokens), which
+ can be parsed by the parser. Lexical units are demarcated by changes in
+ the character set, so any continuous string of letters is considered
+ one unit, any continuous string of numbers is considered one unit.
+
+ The main complication arises from the fact that dots ('.') can be used
+ both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
+ "4:30:21.447"). As such, it is necessary to read the full context of
+ any dot-separated strings before breaking it into tokens; as such, this
+ function maintains a "token stack", for when the ambiguous context
+ demands that multiple tokens be parsed at once.
+ """
+ if self.tokenstack:
+ return self.tokenstack.pop(0)
+
+ seenletters = False
+ token = None
+ state = None
+
+ while not self.eof:
+ # We only realize that we've reached the end of a token when we
+ # find a character that's not part of the current token - since
+ # that character may be part of the next token, it's stored in the
+ # charstack.
+ if self.charstack:
+ nextchar = self.charstack.pop(0)
+ else:
+ nextchar = self.instream.read(1)
+ while nextchar == '\x00':
+ nextchar = self.instream.read(1)
+
+ if not nextchar:
+ self.eof = True
+ break
+ elif not state:
+ # First character of the token - determines if we're starting
+ # to parse a word, a number or something else.
+ token = nextchar
+ if self.isword(nextchar):
+ state = 'a'
+ elif self.isnum(nextchar):
+ state = '0'
+ elif self.isspace(nextchar):
+ token = ' '
+ break # emit token
+ else:
+ break # emit token
+ elif state == 'a':
+ # If we've already started reading a word, we keep reading
+ # letters until we find something that's not part of a word.
+ seenletters = True
+ if self.isword(nextchar):
+ token += nextchar
+ elif nextchar == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0':
+ # If we've already started reading a number, we keep reading
+ # numbers until we find something that doesn't fit.
+ if self.isnum(nextchar):
+ token += nextchar
+ elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == 'a.':
+ # If we've seen some letters and a dot separator, continue
+ # parsing, and the tokens will be broken up later.
+ seenletters = True
+ if nextchar == '.' or self.isword(nextchar):
+ token += nextchar
+ elif self.isnum(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0.':
+ # If we've seen at least one dot separator, keep going, we'll
+ # break up the tokens later.
+ if nextchar == '.' or self.isnum(nextchar):
+ token += nextchar
+ elif self.isword(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+
+ if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
+ token[-1] in '.,')):
+ l = self._split_decimal.split(token)
+ token = l[0]
+ for tok in l[1:]:
+ if tok:
+ self.tokenstack.append(tok)
+
+ if state == '0.' and token.count('.') == 0:
+ token = token.replace(',', '.')
+
+ return token
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.get_token()
+ if token is None:
+ raise StopIteration
+
+ return token
+
+ def next(self):
+ return self.__next__() # Python 2.x support
+
+ @classmethod
+ def split(cls, s):
+ return list(cls(s))
+
+ @classmethod
+ def isword(cls, nextchar):
+ """ Whether or not the next character is part of a word """
+ return nextchar.isalpha()
+
+ @classmethod
+ def isnum(cls, nextchar):
+ """ Whether the next character is part of a number """
+ return nextchar.isdigit()
+
+ @classmethod
+ def isspace(cls, nextchar):
+ """ Whether the next character is whitespace """
+ return nextchar.isspace()
+
+
+class _resultbase(object):
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def _repr(self, classname):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (classname, ", ".join(l))
+
+ def __len__(self):
+ return (sum(getattr(self, attr) is not None
+ for attr in self.__slots__))
+
+ def __repr__(self):
+ return self._repr(self.__class__.__name__)
+
+
+class parserinfo(object):
+ """
+ Class which handles what inputs are accepted. Subclass this to customize
+ the language and acceptable values for each parameter.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. Default is ``False``.
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ Default is ``False``.
+ """
+
+ # m from a.m/p.m, t from ISO T separator
+ JUMP = [" ", ".", ",", ";", "-", "/", "'",
+ "at", "on", "and", "ad", "m", "t", "of",
+ "st", "nd", "rd", "th"]
+
+ WEEKDAYS = [("Mon", "Monday"),
+ ("Tue", "Tuesday"),
+ ("Wed", "Wednesday"),
+ ("Thu", "Thursday"),
+ ("Fri", "Friday"),
+ ("Sat", "Saturday"),
+ ("Sun", "Sunday")]
+ MONTHS = [("Jan", "January"),
+ ("Feb", "February"),
+ ("Mar", "March"),
+ ("Apr", "April"),
+ ("May", "May"),
+ ("Jun", "June"),
+ ("Jul", "July"),
+ ("Aug", "August"),
+ ("Sep", "Sept", "September"),
+ ("Oct", "October"),
+ ("Nov", "November"),
+ ("Dec", "December")]
+ HMS = [("h", "hour", "hours"),
+ ("m", "minute", "minutes"),
+ ("s", "second", "seconds")]
+ AMPM = [("am", "a"),
+ ("pm", "p")]
+ UTCZONE = ["UTC", "GMT", "Z"]
+ PERTAIN = ["of"]
+ TZOFFSET = {}
+
+ def __init__(self, dayfirst=False, yearfirst=False):
+ self._jump = self._convert(self.JUMP)
+ self._weekdays = self._convert(self.WEEKDAYS)
+ self._months = self._convert(self.MONTHS)
+ self._hms = self._convert(self.HMS)
+ self._ampm = self._convert(self.AMPM)
+ self._utczone = self._convert(self.UTCZONE)
+ self._pertain = self._convert(self.PERTAIN)
+
+ self.dayfirst = dayfirst
+ self.yearfirst = yearfirst
+
+ self._year = time.localtime().tm_year
+ self._century = self._year // 100 * 100
+
+ def _convert(self, lst):
+ dct = {}
+ for i, v in enumerate(lst):
+ if isinstance(v, tuple):
+ for v in v:
+ dct[v.lower()] = i
+ else:
+ dct[v.lower()] = i
+ return dct
+
+ def jump(self, name):
+ return name.lower() in self._jump
+
+ def weekday(self, name):
+ if len(name) >= 3:
+ try:
+ return self._weekdays[name.lower()]
+ except KeyError:
+ pass
+ return None
+
+ def month(self, name):
+ if len(name) >= 3:
+ try:
+ return self._months[name.lower()] + 1
+ except KeyError:
+ pass
+ return None
+
+ def hms(self, name):
+ try:
+ return self._hms[name.lower()]
+ except KeyError:
+ return None
+
+ def ampm(self, name):
+ try:
+ return self._ampm[name.lower()]
+ except KeyError:
+ return None
+
+ def pertain(self, name):
+ return name.lower() in self._pertain
+
+ def utczone(self, name):
+ return name.lower() in self._utczone
+
+ def tzoffset(self, name):
+ if name in self._utczone:
+ return 0
+
+ return self.TZOFFSET.get(name)
+
+ def convertyear(self, year, century_specified=False):
+ if year < 100 and not century_specified:
+ year += self._century
+ if abs(year - self._year) >= 50:
+ if year < self._year:
+ year += 100
+ else:
+ year -= 100
+ return year
+
+ def validate(self, res):
+ # move to info
+ if res.year is not None:
+ res.year = self.convertyear(res.year, res.century_specified)
+
+ if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
+ res.tzname = "UTC"
+ res.tzoffset = 0
+ elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
+ res.tzoffset = 0
+ return True
+
+
+class _ymd(list):
+ def __init__(self, tzstr, *args, **kwargs):
+ super(self.__class__, self).__init__(*args, **kwargs)
+ self.century_specified = False
+ self.tzstr = tzstr
+
+ @staticmethod
+ def token_could_be_year(token, year):
+ try:
+ return int(token) == year
+ except ValueError:
+ return False
+
+ @staticmethod
+ def find_potential_year_tokens(year, tokens):
+ return [token for token in tokens if _ymd.token_could_be_year(token, year)]
+
+ def find_probable_year_index(self, tokens):
+ """
+ attempt to deduce if a pre 100 year was lost
+ due to padded zeros being taken off
+ """
+ for index, token in enumerate(self):
+ potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens)
+ if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
+ return index
+
+ def append(self, val):
+ if hasattr(val, '__len__'):
+ if val.isdigit() and len(val) > 2:
+ self.century_specified = True
+ elif val > 100:
+ self.century_specified = True
+
+ super(self.__class__, self).append(int(val))
+
+ def resolve_ymd(self, mstridx, yearfirst, dayfirst):
+ len_ymd = len(self)
+ year, month, day = (None, None, None)
+
+ if len_ymd > 3:
+ raise ValueError("More than three YMD values")
+ elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
+ # One member, or two members with a month string
+ if mstridx != -1:
+ month = self[mstridx]
+ del self[mstridx]
+
+ if len_ymd > 1 or mstridx == -1:
+ if self[0] > 31:
+ year = self[0]
+ else:
+ day = self[0]
+
+ elif len_ymd == 2:
+ # Two members with numbers
+ if self[0] > 31:
+ # 99-01
+ year, month = self
+ elif self[1] > 31:
+ # 01-99
+ month, year = self
+ elif dayfirst and self[1] <= 12:
+ # 13-01
+ day, month = self
+ else:
+ # 01-13
+ month, day = self
+
+ elif len_ymd == 3:
+ # Three members
+ if mstridx == 0:
+ month, day, year = self
+ elif mstridx == 1:
+ if self[0] > 31 or (yearfirst and self[2] <= 31):
+ # 99-Jan-01
+ year, month, day = self
+ else:
+ # 01-Jan-01
+ # Give precendence to day-first, since
+ # two-digit years is usually hand-written.
+ day, month, year = self
+
+ elif mstridx == 2:
+ # WTF!?
+ if self[1] > 31:
+ # 01-99-Jan
+ day, year, month = self
+ else:
+ # 99-01-Jan
+ year, day, month = self
+
+ else:
+ if self[0] > 31 or \
+ self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \
+ (yearfirst and self[1] <= 12 and self[2] <= 31):
+ # 99-01-01
+ if dayfirst and self[2] <= 12:
+ year, day, month = self
+ else:
+ year, month, day = self
+ elif self[0] > 12 or (dayfirst and self[1] <= 12):
+ # 13-01-01
+ day, month, year = self
+ else:
+ # 01-13-01
+ month, day, year = self
+
+ return year, month, day
+
+
+class parser(object):
+ def __init__(self, info=None):
+ self.info = info or parserinfo()
+
+ def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs):
+ """
+ Parse the date/time string into a :class:`datetime.datetime` object.
+
+ :param timestr:
+ Any date/time string using the supported formats.
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a
+ naive :class:`datetime.datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in minutes or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param **kwargs:
+ Keyword arguments as passed to ``_parse()``.
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ValueError:
+ Raised for invalid or unknown string format, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date
+ would be created.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+
+ if default is None:
+ effective_dt = datetime.datetime.now()
+ default = datetime.datetime.now().replace(hour=0, minute=0,
+ second=0, microsecond=0)
+ else:
+ effective_dt = default
+
+ res, skipped_tokens = self._parse(timestr, **kwargs)
+
+ if res is None:
+ raise ValueError("Unknown string format")
+
+ if len(res) == 0:
+ raise ValueError("String does not contain a date.")
+
+ repl = {}
+ for attr in ("year", "month", "day", "hour",
+ "minute", "second", "microsecond"):
+ value = getattr(res, attr)
+ if value is not None:
+ repl[attr] = value
+
+ if 'day' not in repl:
+ # If the default day exceeds the last day of the month, fall back to
+ # the end of the month.
+ cyear = default.year if res.year is None else res.year
+ cmonth = default.month if res.month is None else res.month
+ cday = default.day if res.day is None else res.day
+
+ if cday > monthrange(cyear, cmonth)[1]:
+ repl['day'] = monthrange(cyear, cmonth)[1]
+
+ ret = default.replace(**repl)
+
+ if res.weekday is not None and not res.day:
+ ret = ret+relativedelta.relativedelta(weekday=res.weekday)
+
+ if not ignoretz:
+ if (isinstance(tzinfos, collections.Callable) or
+ tzinfos and res.tzname in tzinfos):
+
+ if isinstance(tzinfos, collections.Callable):
+ tzdata = tzinfos(res.tzname, res.tzoffset)
+ else:
+ tzdata = tzinfos.get(res.tzname)
+
+ if isinstance(tzdata, datetime.tzinfo):
+ tzinfo = tzdata
+ elif isinstance(tzdata, text_type):
+ tzinfo = tz.tzstr(tzdata)
+ elif isinstance(tzdata, integer_types):
+ tzinfo = tz.tzoffset(res.tzname, tzdata)
+ else:
+ raise ValueError("Offset must be tzinfo subclass, "
+ "tz string, or int offset.")
+ ret = ret.replace(tzinfo=tzinfo)
+ elif res.tzname and res.tzname in time.tzname:
+ ret = ret.replace(tzinfo=tz.tzlocal())
+ elif res.tzoffset == 0:
+ ret = ret.replace(tzinfo=tz.tzutc())
+ elif res.tzoffset:
+ ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
+
+ if kwargs.get('fuzzy_with_tokens', False):
+ return ret, skipped_tokens
+ else:
+ return ret
+
+ class _result(_resultbase):
+ __slots__ = ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond",
+ "tzname", "tzoffset", "ampm"]
+
+ def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
+ fuzzy_with_tokens=False):
+ """
+ Private method which performs the heavy lifting of parsing, called from
+ ``parse()``, which passes on its ``kwargs`` to this function.
+
+ :param timestr:
+ The string to parse.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. If set to ``None``, this value is retrieved from the
+ current :class:`parserinfo` object (which itself defaults to
+ ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ If this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ """
+ if fuzzy_with_tokens:
+ fuzzy = True
+
+ info = self.info
+
+ if dayfirst is None:
+ dayfirst = info.dayfirst
+
+ if yearfirst is None:
+ yearfirst = info.yearfirst
+
+ res = self._result()
+ l = _timelex.split(timestr) # Splits the timestr into tokens
+
+ # keep up with the last token skipped so we can recombine
+ # consecutively skipped tokens (-2 for when i begins at 0).
+ last_skipped_token_i = -2
+ skipped_tokens = list()
+
+ try:
+ # year/month/day list
+ ymd = _ymd(timestr)
+
+ # Index of the month string in ymd
+ mstridx = -1
+
+ len_l = len(l)
+ i = 0
+ while i < len_l:
+
+ # Check if it's a number
+ try:
+ value_repr = l[i]
+ value = float(value_repr)
+ except ValueError:
+ value = None
+
+ if value is not None:
+ # Token is a number
+ len_li = len(l[i])
+ i += 1
+
+ if (len(ymd) == 3 and len_li in (2, 4)
+ and res.hour is None and (i >= len_l or (l[i] != ':' and
+ info.hms(l[i]) is None))):
+ # 19990101T23[59]
+ s = l[i-1]
+ res.hour = int(s[:2])
+
+ if len_li == 4:
+ res.minute = int(s[2:])
+
+ elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
+ # YYMMDD or HHMMSS[.ss]
+ s = l[i-1]
+
+ if not ymd and l[i-1].find('.') == -1:
+ #ymd.append(info.convertyear(int(s[:2])))
+
+ ymd.append(s[:2])
+ ymd.append(s[2:4])
+ ymd.append(s[4:])
+ else:
+ # 19990101T235959[.59]
+ res.hour = int(s[:2])
+ res.minute = int(s[2:4])
+ res.second, res.microsecond = _parsems(s[4:])
+
+ elif len_li in (8, 12, 14):
+ # YYYYMMDD
+ s = l[i-1]
+ ymd.append(s[:4])
+ ymd.append(s[4:6])
+ ymd.append(s[6:8])
+
+ if len_li > 8:
+ res.hour = int(s[8:10])
+ res.minute = int(s[10:12])
+
+ if len_li > 12:
+ res.second = int(s[12:])
+
+ elif ((i < len_l and info.hms(l[i]) is not None) or
+ (i+1 < len_l and l[i] == ' ' and
+ info.hms(l[i+1]) is not None)):
+
+ # HH[ ]h or MM[ ]m or SS[.ss][ ]s
+ if l[i] == ' ':
+ i += 1
+
+ idx = info.hms(l[i])
+
+ while True:
+ if idx == 0:
+ res.hour = int(value)
+
+ if value % 1:
+ res.minute = int(60*(value % 1))
+
+ elif idx == 1:
+ res.minute = int(value)
+
+ if value % 1:
+ res.second = int(60*(value % 1))
+
+ elif idx == 2:
+ res.second, res.microsecond = \
+ _parsems(value_repr)
+
+ i += 1
+
+ if i >= len_l or idx == 2:
+ break
+
+ # 12h00
+ try:
+ value_repr = l[i]
+ value = float(value_repr)
+ except ValueError:
+ break
+ else:
+ i += 1
+ idx += 1
+
+ if i < len_l:
+ newidx = info.hms(l[i])
+
+ if newidx is not None:
+ idx = newidx
+
+ elif (i == len_l and l[i-2] == ' ' and
+ info.hms(l[i-3]) is not None):
+ # X h MM or X m SS
+ idx = info.hms(l[i-3]) + 1
+
+ if idx == 1:
+ res.minute = int(value)
+
+ if value % 1:
+ res.second = int(60*(value % 1))
+ elif idx == 2:
+ res.second, res.microsecond = \
+ _parsems(value_repr)
+ i += 1
+
+ elif i+1 < len_l and l[i] == ':':
+ # HH:MM[:SS[.ss]]
+ res.hour = int(value)
+ i += 1
+ value = float(l[i])
+ res.minute = int(value)
+
+ if value % 1:
+ res.second = int(60*(value % 1))
+
+ i += 1
+
+ if i < len_l and l[i] == ':':
+ res.second, res.microsecond = _parsems(l[i+1])
+ i += 2
+
+ elif i < len_l and l[i] in ('-', '/', '.'):
+ sep = l[i]
+ ymd.append(value_repr)
+ i += 1
+
+ if i < len_l and not info.jump(l[i]):
+ try:
+ # 01-01[-01]
+ ymd.append(l[i])
+ except ValueError:
+ # 01-Jan[-01]
+ value = info.month(l[i])
+
+ if value is not None:
+ ymd.append(value)
+ assert mstridx == -1
+ mstridx = len(ymd)-1
+ else:
+ return None, None
+
+ i += 1
+
+ if i < len_l and l[i] == sep:
+ # We have three members
+ i += 1
+ value = info.month(l[i])
+
+ if value is not None:
+ ymd.append(value)
+ mstridx = len(ymd)-1
+ assert mstridx == -1
+ else:
+ ymd.append(l[i])
+
+ i += 1
+ elif i >= len_l or info.jump(l[i]):
+ if i+1 < len_l and info.ampm(l[i+1]) is not None:
+ # 12 am
+ res.hour = int(value)
+
+ if res.hour < 12 and info.ampm(l[i+1]) == 1:
+ res.hour += 12
+ elif res.hour == 12 and info.ampm(l[i+1]) == 0:
+ res.hour = 0
+
+ i += 1
+ else:
+ # Year, month or day
+ ymd.append(value)
+ i += 1
+ elif info.ampm(l[i]) is not None:
+
+ # 12am
+ res.hour = int(value)
+
+ if res.hour < 12 and info.ampm(l[i]) == 1:
+ res.hour += 12
+ elif res.hour == 12 and info.ampm(l[i]) == 0:
+ res.hour = 0
+ i += 1
+
+ elif not fuzzy:
+ return None, None
+ else:
+ i += 1
+ continue
+
+ # Check weekday
+ value = info.weekday(l[i])
+ if value is not None:
+ res.weekday = value
+ i += 1
+ continue
+
+ # Check month name
+ value = info.month(l[i])
+ if value is not None:
+ ymd.append(value)
+ assert mstridx == -1
+ mstridx = len(ymd)-1
+
+ i += 1
+ if i < len_l:
+ if l[i] in ('-', '/'):
+ # Jan-01[-99]
+ sep = l[i]
+ i += 1
+ ymd.append(l[i])
+ i += 1
+
+ if i < len_l and l[i] == sep:
+ # Jan-01-99
+ i += 1
+ ymd.append(l[i])
+ i += 1
+
+ elif (i+3 < len_l and l[i] == l[i+2] == ' '
+ and info.pertain(l[i+1])):
+ # Jan of 01
+ # In this case, 01 is clearly year
+ try:
+ value = int(l[i+3])
+ except ValueError:
+ # Wrong guess
+ pass
+ else:
+ # Convert it here to become unambiguous
+ ymd.append(str(info.convertyear(value)))
+ i += 4
+ continue
+
+ # Check am/pm
+ value = info.ampm(l[i])
+ if value is not None:
+ # For fuzzy parsing, 'a' or 'am' (both valid English words)
+ # may erroneously trigger the AM/PM flag. Deal with that
+ # here.
+ val_is_ampm = True
+
+ # If there's already an AM/PM flag, this one isn't one.
+ if fuzzy and res.ampm is not None:
+ val_is_ampm = False
+
+ # If AM/PM is found and hour is not, raise a ValueError
+ if res.hour is None:
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('No hour specified with ' +
+ 'AM or PM flag.')
+ elif not 0 <= res.hour <= 12:
+ # If AM/PM is found, it's a 12 hour clock, so raise
+ # an error for invalid range
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('Invalid hour specified for ' +
+ '12-hour clock.')
+
+ if val_is_ampm:
+ if value == 1 and res.hour < 12:
+ res.hour += 12
+ elif value == 0 and res.hour == 12:
+ res.hour = 0
+
+ res.ampm = value
+
+ i += 1
+ continue
+
+ # Check for a timezone name
+ if (res.hour is not None and len(l[i]) <= 5 and
+ res.tzname is None and res.tzoffset is None and
+ not [x for x in l[i] if x not in
+ string.ascii_uppercase]):
+ res.tzname = l[i]
+ res.tzoffset = info.tzoffset(res.tzname)
+ i += 1
+
+ # Check for something like GMT+3, or BRST+3. Notice
+ # that it doesn't mean "I am 3 hours after GMT", but
+ # "my time +3 is GMT". If found, we reverse the
+ # logic so that timezone parsing code will get it
+ # right.
+ if i < len_l and l[i] in ('+', '-'):
+ l[i] = ('+', '-')[l[i] == '+']
+ res.tzoffset = None
+ if info.utczone(res.tzname):
+ # With something like GMT+3, the timezone
+ # is *not* GMT.
+ res.tzname = None
+
+ continue
+
+ # Check for a numbered timezone
+ if res.hour is not None and l[i] in ('+', '-'):
+ signal = (-1, 1)[l[i] == '+']
+ i += 1
+ len_li = len(l[i])
+
+ if len_li == 4:
+ # -0300
+ res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ res.tzoffset = int(l[i])*3600+int(l[i+2])*60
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ res.tzoffset = int(l[i][:2])*3600
+ else:
+ return None, None
+ i += 1
+
+ res.tzoffset *= signal
+
+ # Look for a timezone name between parenthesis
+ if (i+3 < len_l and
+ info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
+ 3 <= len(l[i+2]) <= 5 and
+ not [x for x in l[i+2]
+ if x not in string.ascii_uppercase]):
+ # -0300 (BRST)
+ res.tzname = l[i+2]
+ i += 4
+ continue
+
+ # Check jumps
+ if not (info.jump(l[i]) or fuzzy):
+ return None, None
+
+ if last_skipped_token_i == i - 1:
+ # recombine the tokens
+ skipped_tokens[-1] += l[i]
+ else:
+ # just append
+ skipped_tokens.append(l[i])
+ last_skipped_token_i = i
+ i += 1
+
+ # Process year/month/day
+ year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
+ if year is not None:
+ res.year = year
+ res.century_specified = ymd.century_specified
+
+ if month is not None:
+ res.month = month
+
+ if day is not None:
+ res.day = day
+
+ except (IndexError, ValueError, AssertionError):
+ return None, None
+
+ if not info.validate(res):
+ return None, None
+
+ if fuzzy_with_tokens:
+ return res, tuple(skipped_tokens)
+ else:
+ return res, None
+
+DEFAULTPARSER = parser()
+
+
+def parse(timestr, parserinfo=None, **kwargs):
+ """
+
+ Parse a string in one of the supported formats, using the
+ ``parserinfo`` parameters.
+
+ :param timestr:
+ A string containing a date/time stamp.
+
+ :param parserinfo:
+ A :class:`parserinfo` object containing parameters for the parser.
+ If ``None``, the default arguments to the :class:`parserinfo`
+ constructor are used.
+
+ The ``**kwargs`` parameter takes the following keyword arguments:
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a naive
+ :class:`datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in minutes or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM and
+ YMD. If set to ``None``, this value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
+ be the year, otherwise the last number is taken to be the year. If
+ this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2011, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ValueError:
+ Raised for invalid or unknown string format, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date
+ would be created.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+ if parserinfo:
+ return parser(parserinfo).parse(timestr, **kwargs)
+ else:
+ return DEFAULTPARSER.parse(timestr, **kwargs)
+
+
+class _tzparser(object):
+
+ class _result(_resultbase):
+
+ __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
+ "start", "end"]
+
+ class _attr(_resultbase):
+ __slots__ = ["month", "week", "weekday",
+ "yday", "jyday", "day", "time"]
+
+ def __repr__(self):
+ return self._repr("")
+
+ def __init__(self):
+ _resultbase.__init__(self)
+ self.start = self._attr()
+ self.end = self._attr()
+
+ def parse(self, tzstr):
+ res = self._result()
+ l = _timelex.split(tzstr)
+ try:
+
+ len_l = len(l)
+
+ i = 0
+ while i < len_l:
+ # BRST+3[BRDT[+2]]
+ j = i
+ while j < len_l and not [x for x in l[j]
+ if x in "0123456789:,-+"]:
+ j += 1
+ if j != i:
+ if not res.stdabbr:
+ offattr = "stdoffset"
+ res.stdabbr = "".join(l[i:j])
+ else:
+ offattr = "dstoffset"
+ res.dstabbr = "".join(l[i:j])
+ i = j
+ if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
+ "0123456789")):
+ if l[i] in ('+', '-'):
+ # Yes, that's right. See the TZ variable
+ # documentation.
+ signal = (1, -1)[l[i] == '+']
+ i += 1
+ else:
+ signal = -1
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ setattr(res, offattr, (int(l[i][:2])*3600 +
+ int(l[i][2:])*60)*signal)
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ setattr(res, offattr,
+ (int(l[i])*3600+int(l[i+2])*60)*signal)
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ setattr(res, offattr,
+ int(l[i][:2])*3600*signal)
+ else:
+ return None
+ i += 1
+ if res.dstabbr:
+ break
+ else:
+ break
+
+ if i < len_l:
+ for j in range(i, len_l):
+ if l[j] == ';':
+ l[j] = ','
+
+ assert l[i] == ','
+
+ i += 1
+
+ if i >= len_l:
+ pass
+ elif (8 <= l.count(',') <= 9 and
+ not [y for x in l[i:] if x != ','
+ for y in x if y not in "0123456789"]):
+ # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
+ for x in (res.start, res.end):
+ x.month = int(l[i])
+ i += 2
+ if l[i] == '-':
+ value = int(l[i+1])*-1
+ i += 1
+ else:
+ value = int(l[i])
+ i += 2
+ if value:
+ x.week = value
+ x.weekday = (int(l[i])-1) % 7
+ else:
+ x.day = int(l[i])
+ i += 2
+ x.time = int(l[i])
+ i += 2
+ if i < len_l:
+ if l[i] in ('-', '+'):
+ signal = (-1, 1)[l[i] == "+"]
+ i += 1
+ else:
+ signal = 1
+ res.dstoffset = (res.stdoffset+int(l[i]))*signal
+ elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
+ not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
+ '.', '-', ':')
+ for y in x if y not in "0123456789"]):
+ for x in (res.start, res.end):
+ if l[i] == 'J':
+ # non-leap year day (1 based)
+ i += 1
+ x.jyday = int(l[i])
+ elif l[i] == 'M':
+ # month[-.]week[-.]weekday
+ i += 1
+ x.month = int(l[i])
+ i += 1
+ assert l[i] in ('-', '.')
+ i += 1
+ x.week = int(l[i])
+ if x.week == 5:
+ x.week = -1
+ i += 1
+ assert l[i] in ('-', '.')
+ i += 1
+ x.weekday = (int(l[i])-1) % 7
+ else:
+ # year day (zero based)
+ x.yday = int(l[i])+1
+
+ i += 1
+
+ if i < len_l and l[i] == '/':
+ i += 1
+ # start time
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
+ elif i+1 < len_l and l[i+1] == ':':
+ # -03:00
+ x.time = int(l[i])*3600+int(l[i+2])*60
+ i += 2
+ if i+1 < len_l and l[i+1] == ':':
+ i += 2
+ x.time += int(l[i])
+ elif len_li <= 2:
+ # -[0]3
+ x.time = (int(l[i][:2])*3600)
+ else:
+ return None
+ i += 1
+
+ assert i == len_l or l[i] == ','
+
+ i += 1
+
+ assert i >= len_l
+
+ except (IndexError, ValueError, AssertionError):
+ return None
+
+ return res
+
+
+DEFAULTTZPARSER = _tzparser()
+
+
+def _parsetz(tzstr):
+ return DEFAULTTZPARSER.parse(tzstr)
+
+
+def _parsems(value):
+ """Parse a I[.F] seconds value into (seconds, microseconds)."""
+ if "." not in value:
+ return int(value), 0
+ else:
+ i, f = value.split(".")
+ return int(i), int(f.ljust(6, "0")[:6])
+
+
+# vim:ts=4:sw=4:et
diff --git a/app/lib/dateutil/relativedelta.py b/app/lib/dateutil/relativedelta.py
new file mode 100644
index 0000000..7e3bd12
--- /dev/null
+++ b/app/lib/dateutil/relativedelta.py
@@ -0,0 +1,531 @@
+# -*- coding: utf-8 -*-
+import datetime
+import calendar
+
+import operator
+from math import copysign
+
+from six import integer_types
+from warnings import warn
+
+from ._common import weekday
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
+
+__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+
+class relativedelta(object):
+ """
+ The relativedelta type is based on the specification of the excellent
+ work done by M.-A. Lemburg in his
+ `mx.DateTime `_ extension.
+ However, notice that this type does *NOT* implement the same algorithm as
+ his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
+
+ There are two different ways to build a relativedelta instance. The
+ first one is passing it two date/datetime classes::
+
+ relativedelta(datetime1, datetime2)
+
+ The second one is passing it any number of the following keyword arguments::
+
+ relativedelta(arg1=x,arg2=y,arg3=z...)
+
+ year, month, day, hour, minute, second, microsecond:
+ Absolute information (argument is singular); adding or subtracting a
+ relativedelta with absolute information does not perform an aritmetic
+ operation, but rather REPLACES the corresponding value in the
+ original datetime with the value(s) in relativedelta.
+
+ years, months, weeks, days, hours, minutes, seconds, microseconds:
+ Relative information, may be negative (argument is plural); adding
+ or subtracting a relativedelta with relative information performs
+ the corresponding aritmetic operation on the original datetime value
+ with the information in the relativedelta.
+
+ weekday:
+ One of the weekday instances (MO, TU, etc). These instances may
+ receive a parameter N, specifying the Nth weekday, which could
+ be positive or negative (like MO(+1) or MO(-2). Not specifying
+ it is the same as specifying +1. You can also use an integer,
+ where 0=MO.
+
+ leapdays:
+ Will add given days to the date found, if year is a leap
+ year, and the date found is post 28 of february.
+
+ yearday, nlyearday:
+ Set the yearday or the non-leap year day (jump leap days).
+ These are converted to day/month/leapdays information.
+
+ Here is the behavior of operations with relativedelta:
+
+ 1. Calculate the absolute year, using the 'year' argument, or the
+ original datetime year, if the argument is not present.
+
+ 2. Add the relative 'years' argument to the absolute year.
+
+ 3. Do steps 1 and 2 for month/months.
+
+ 4. Calculate the absolute day, using the 'day' argument, or the
+ original datetime day, if the argument is not present. Then,
+ subtract from the day until it fits in the year and month
+ found after their operations.
+
+ 5. Add the relative 'days' argument to the absolute day. Notice
+ that the 'weeks' argument is multiplied by 7 and added to
+ 'days'.
+
+ 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
+ microsecond/microseconds.
+
+ 7. If the 'weekday' argument is present, calculate the weekday,
+ with the given (wday, nth) tuple. wday is the index of the
+ weekday (0-6, 0=Mon), and nth is the number of weeks to add
+ forward or backward, depending on its signal. Notice that if
+ the calculated date is already Monday, for example, using
+ (0, 1) or (0, -1) won't change the day.
+ """
+
+ def __init__(self, dt1=None, dt2=None,
+ years=0, months=0, days=0, leapdays=0, weeks=0,
+ hours=0, minutes=0, seconds=0, microseconds=0,
+ year=None, month=None, day=None, weekday=None,
+ yearday=None, nlyearday=None,
+ hour=None, minute=None, second=None, microsecond=None):
+
+ # Check for non-integer values in integer-only quantities
+ if any(x is not None and x != int(x) for x in (years, months)):
+ raise ValueError("Non-integer years and months are "
+ "ambiguous and not currently supported.")
+
+ if dt1 and dt2:
+ # datetime is a subclass of date. So both must be date
+ if not (isinstance(dt1, datetime.date) and
+ isinstance(dt2, datetime.date)):
+ raise TypeError("relativedelta only diffs datetime/date")
+
+ # We allow two dates, or two datetimes, so we coerce them to be
+ # of the same type
+ if (isinstance(dt1, datetime.datetime) !=
+ isinstance(dt2, datetime.datetime)):
+ if not isinstance(dt1, datetime.datetime):
+ dt1 = datetime.datetime.fromordinal(dt1.toordinal())
+ elif not isinstance(dt2, datetime.datetime):
+ dt2 = datetime.datetime.fromordinal(dt2.toordinal())
+
+ self.years = 0
+ self.months = 0
+ self.days = 0
+ self.leapdays = 0
+ self.hours = 0
+ self.minutes = 0
+ self.seconds = 0
+ self.microseconds = 0
+ self.year = None
+ self.month = None
+ self.day = None
+ self.weekday = None
+ self.hour = None
+ self.minute = None
+ self.second = None
+ self.microsecond = None
+ self._has_time = 0
+
+ # Get year / month delta between the two
+ months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
+ self._set_months(months)
+
+ # Remove the year/month delta so the timedelta is just well-defined
+ # time units (seconds, days and microseconds)
+ dtm = self.__radd__(dt2)
+
+ # If we've overshot our target, make an adjustment
+ if dt1 < dt2:
+ compare = operator.gt
+ increment = 1
+ else:
+ compare = operator.lt
+ increment = -1
+
+ while compare(dt1, dtm):
+ months += increment
+ self._set_months(months)
+ dtm = self.__radd__(dt2)
+
+ # Get the timedelta between the "months-adjusted" date and dt1
+ delta = dt1 - dtm
+ self.seconds = delta.seconds + delta.days * 86400
+ self.microseconds = delta.microseconds
+ else:
+ # Relative information
+ self.years = years
+ self.months = months
+ self.days = days + weeks * 7
+ self.leapdays = leapdays
+ self.hours = hours
+ self.minutes = minutes
+ self.seconds = seconds
+ self.microseconds = microseconds
+
+ # Absolute information
+ self.year = year
+ self.month = month
+ self.day = day
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+ self.microsecond = microsecond
+
+ if any(x is not None and int(x) != x
+ for x in (year, month, day, hour,
+ minute, second, microsecond)):
+ # For now we'll deprecate floats - later it'll be an error.
+ warn("Non-integer value passed as absolute information. " +
+ "This is not a well-defined condition and will raise " +
+ "errors in future versions.", DeprecationWarning)
+
+
+ if isinstance(weekday, integer_types):
+ self.weekday = weekdays[weekday]
+ else:
+ self.weekday = weekday
+
+ yday = 0
+ if nlyearday:
+ yday = nlyearday
+ elif yearday:
+ yday = yearday
+ if yearday > 59:
+ self.leapdays = -1
+ if yday:
+ ydayidx = [31, 59, 90, 120, 151, 181, 212,
+ 243, 273, 304, 334, 366]
+ for idx, ydays in enumerate(ydayidx):
+ if yday <= ydays:
+ self.month = idx+1
+ if idx == 0:
+ self.day = yday
+ else:
+ self.day = yday-ydayidx[idx-1]
+ break
+ else:
+ raise ValueError("invalid year day (%d)" % yday)
+
+ self._fix()
+
+ def _fix(self):
+ if abs(self.microseconds) > 999999:
+ s = _sign(self.microseconds)
+ div, mod = divmod(self.microseconds * s, 1000000)
+ self.microseconds = mod * s
+ self.seconds += div * s
+ if abs(self.seconds) > 59:
+ s = _sign(self.seconds)
+ div, mod = divmod(self.seconds * s, 60)
+ self.seconds = mod * s
+ self.minutes += div * s
+ if abs(self.minutes) > 59:
+ s = _sign(self.minutes)
+ div, mod = divmod(self.minutes * s, 60)
+ self.minutes = mod * s
+ self.hours += div * s
+ if abs(self.hours) > 23:
+ s = _sign(self.hours)
+ div, mod = divmod(self.hours * s, 24)
+ self.hours = mod * s
+ self.days += div * s
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years += div * s
+ if (self.hours or self.minutes or self.seconds or self.microseconds
+ or self.hour is not None or self.minute is not None or
+ self.second is not None or self.microsecond is not None):
+ self._has_time = 1
+ else:
+ self._has_time = 0
+
+ @property
+ def weeks(self):
+ return self.days // 7
+ @weeks.setter
+ def weeks(self, value):
+ self.days = self.days - (self.weeks * 7) + value * 7
+
+ def _set_months(self, months):
+ self.months = months
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years = div * s
+ else:
+ self.years = 0
+
+ def normalized(self):
+ """
+ Return a version of this object represented entirely using integer
+ values for the relative attributes.
+
+ >>> relativedelta(days=1.5, hours=2).normalized()
+ relativedelta(days=1, hours=14)
+
+ :return:
+ Returns a :class:`dateutil.relativedelta.relativedelta` object.
+ """
+ # Cascade remainders down (rounding each to roughly nearest microsecond)
+ days = int(self.days)
+
+ hours_f = round(self.hours + 24 * (self.days - days), 11)
+ hours = int(hours_f)
+
+ minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
+ minutes = int(minutes_f)
+
+ seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
+ seconds = int(seconds_f)
+
+ microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
+
+ # Constructor carries overflow back up with call to _fix()
+ return self.__class__(years=self.years, months=self.months,
+ days=days, hours=hours, minutes=minutes,
+ seconds=seconds, microseconds=microseconds,
+ leapdays=self.leapdays, year=self.year,
+ month=self.month, day=self.day,
+ weekday=self.weekday, hour=self.hour,
+ minute=self.minute, second=self.second,
+ microsecond=self.microsecond)
+
+ def __add__(self, other):
+ if isinstance(other, relativedelta):
+ return self.__class__(years=other.years + self.years,
+ months=other.months + self.months,
+ days=other.days + self.days,
+ hours=other.hours + self.hours,
+ minutes=other.minutes + self.minutes,
+ seconds=other.seconds + self.seconds,
+ microseconds=(other.microseconds +
+ self.microseconds),
+ leapdays=other.leapdays or self.leapdays,
+ year=other.year or self.year,
+ month=other.month or self.month,
+ day=other.day or self.day,
+ weekday=other.weekday or self.weekday,
+ hour=other.hour or self.hour,
+ minute=other.minute or self.minute,
+ second=other.second or self.second,
+ microsecond=(other.microsecond or
+ self.microsecond))
+ if isinstance(other, datetime.timedelta):
+ return self.__class__(years=self.years,
+ months=self.months,
+ days=self.days + other.days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds + other.seconds,
+ microseconds=self.microseconds + other.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+ if not isinstance(other, datetime.date):
+ return NotImplemented
+ elif self._has_time and not isinstance(other, datetime.datetime):
+ other = datetime.datetime.fromordinal(other.toordinal())
+ year = (self.year or other.year)+self.years
+ month = self.month or other.month
+ if self.months:
+ assert 1 <= abs(self.months) <= 12
+ month += self.months
+ if month > 12:
+ year += 1
+ month -= 12
+ elif month < 1:
+ year -= 1
+ month += 12
+ day = min(calendar.monthrange(year, month)[1],
+ self.day or other.day)
+ repl = {"year": year, "month": month, "day": day}
+ for attr in ["hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ repl[attr] = value
+ days = self.days
+ if self.leapdays and month > 2 and calendar.isleap(year):
+ days += self.leapdays
+ ret = (other.replace(**repl)
+ + datetime.timedelta(days=days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds,
+ microseconds=self.microseconds))
+ if self.weekday:
+ weekday, nth = self.weekday.weekday, self.weekday.n or 1
+ jumpdays = (abs(nth) - 1) * 7
+ if nth > 0:
+ jumpdays += (7 - ret.weekday() + weekday) % 7
+ else:
+ jumpdays += (ret.weekday() - weekday) % 7
+ jumpdays *= -1
+ ret += datetime.timedelta(days=jumpdays)
+ return ret
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __rsub__(self, other):
+ return self.__neg__().__radd__(other)
+
+ def __sub__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented # In case the other object defines __rsub__
+ return self.__class__(years=self.years - other.years,
+ months=self.months - other.months,
+ days=self.days - other.days,
+ hours=self.hours - other.hours,
+ minutes=self.minutes - other.minutes,
+ seconds=self.seconds - other.seconds,
+ microseconds=self.microseconds - other.microseconds,
+ leapdays=self.leapdays or other.leapdays,
+ year=self.year or other.year,
+ month=self.month or other.month,
+ day=self.day or other.day,
+ weekday=self.weekday or other.weekday,
+ hour=self.hour or other.hour,
+ minute=self.minute or other.minute,
+ second=self.second or other.second,
+ microsecond=self.microsecond or other.microsecond)
+
+ def __neg__(self):
+ return self.__class__(years=-self.years,
+ months=-self.months,
+ days=-self.days,
+ hours=-self.hours,
+ minutes=-self.minutes,
+ seconds=-self.seconds,
+ microseconds=-self.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ def __bool__(self):
+ return not (not self.years and
+ not self.months and
+ not self.days and
+ not self.hours and
+ not self.minutes and
+ not self.seconds and
+ not self.microseconds and
+ not self.leapdays and
+ self.year is None and
+ self.month is None and
+ self.day is None and
+ self.weekday is None and
+ self.hour is None and
+ self.minute is None and
+ self.second is None and
+ self.microsecond is None)
+ # Compatibility with Python 2.x
+ __nonzero__ = __bool__
+
+ def __mul__(self, other):
+ try:
+ f = float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__class__(years=int(self.years * f),
+ months=int(self.months * f),
+ days=int(self.days * f),
+ hours=int(self.hours * f),
+ minutes=int(self.minutes * f),
+ seconds=int(self.seconds * f),
+ microseconds=int(self.microseconds * f),
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ __rmul__ = __mul__
+
+ def __eq__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented
+ if self.weekday or other.weekday:
+ if not self.weekday or not other.weekday:
+ return False
+ if self.weekday.weekday != other.weekday.weekday:
+ return False
+ n1, n2 = self.weekday.n, other.weekday.n
+ if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
+ return False
+ return (self.years == other.years and
+ self.months == other.months and
+ self.days == other.days and
+ self.hours == other.hours and
+ self.minutes == other.minutes and
+ self.seconds == other.seconds and
+ self.microseconds == other.microseconds and
+ self.leapdays == other.leapdays and
+ self.year == other.year and
+ self.month == other.month and
+ self.day == other.day and
+ self.hour == other.hour and
+ self.minute == other.minute and
+ self.second == other.second and
+ self.microsecond == other.microsecond)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __div__(self, other):
+ try:
+ reciprocal = 1 / float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__mul__(reciprocal)
+
+ __truediv__ = __div__
+
+ def __repr__(self):
+ l = []
+ for attr in ["years", "months", "days", "leapdays",
+ "hours", "minutes", "seconds", "microseconds"]:
+ value = getattr(self, attr)
+ if value:
+ l.append("{attr}={value:+g}".format(attr=attr, value=value))
+ for attr in ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("{attr}={value}".format(attr=attr, value=repr(value)))
+ return "{classname}({attrs})".format(classname=self.__class__.__name__,
+ attrs=", ".join(l))
+
+def _sign(x):
+ return int(copysign(1, x))
+
+# vim:ts=4:sw=4:et
diff --git a/app/lib/dateutil/rrule.py b/app/lib/dateutil/rrule.py
new file mode 100644
index 0000000..da94351
--- /dev/null
+++ b/app/lib/dateutil/rrule.py
@@ -0,0 +1,1607 @@
+# -*- coding: utf-8 -*-
+"""
+The rrule module offers a small, complete, and very fast, implementation of
+the recurrence rules documented in the
+`iCalendar RFC `_,
+including support for caching of results.
+"""
+import itertools
+import datetime
+import calendar
+import sys
+
+try:
+ from math import gcd
+except ImportError:
+ from fractions import gcd
+
+from six import advance_iterator, integer_types
+from six.moves import _thread, range
+import heapq
+
+from ._common import weekday as weekdaybase
+
+# For warning about deprecation of until and count
+from warnings import warn
+
+__all__ = ["rrule", "rruleset", "rrulestr",
+ "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
+ "HOURLY", "MINUTELY", "SECONDLY",
+ "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+# Every mask is 7 days longer to handle cross-year weekly periods.
+M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
+ [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
+M365MASK = list(M366MASK)
+M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
+MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+MDAY365MASK = list(MDAY366MASK)
+M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
+NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+NMDAY365MASK = list(NMDAY366MASK)
+M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
+M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
+WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
+del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
+MDAY365MASK = tuple(MDAY365MASK)
+M365MASK = tuple(M365MASK)
+
+FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY']
+
+(YEARLY,
+ MONTHLY,
+ WEEKLY,
+ DAILY,
+ HOURLY,
+ MINUTELY,
+ SECONDLY) = list(range(7))
+
+# Imported on demand.
+easter = None
+parser = None
+
+class weekday(weekdaybase):
+ """
+ This version of weekday does not allow n = 0.
+ """
+ def __init__(self, wkday, n=None):
+ if n == 0:
+ raise ValueError("Can't create weekday with n==0")
+
+ super(weekday, self).__init__(wkday, n)
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
+
+
+def _invalidates_cache(f):
+ """
+ Decorator for rruleset methods which may invalidate the
+ cached length.
+ """
+ def inner_func(self, *args, **kwargs):
+ rv = f(self, *args, **kwargs)
+ self._invalidate_cache()
+ return rv
+
+ return inner_func
+
+
+class rrulebase(object):
+ def __init__(self, cache=False):
+ if cache:
+ self._cache = []
+ self._cache_lock = _thread.allocate_lock()
+ self._invalidate_cache()
+ else:
+ self._cache = None
+ self._cache_complete = False
+ self._len = None
+
+ def __iter__(self):
+ if self._cache_complete:
+ return iter(self._cache)
+ elif self._cache is None:
+ return self._iter()
+ else:
+ return self._iter_cached()
+
+ def _invalidate_cache(self):
+ if self._cache is not None:
+ self._cache = []
+ self._cache_complete = False
+ self._cache_gen = self._iter()
+
+ if self._cache_lock.locked():
+ self._cache_lock.release()
+
+ self._len = None
+
+ def _iter_cached(self):
+ i = 0
+ gen = self._cache_gen
+ cache = self._cache
+ acquire = self._cache_lock.acquire
+ release = self._cache_lock.release
+ while gen:
+ if i == len(cache):
+ acquire()
+ if self._cache_complete:
+ break
+ try:
+ for j in range(10):
+ cache.append(advance_iterator(gen))
+ except StopIteration:
+ self._cache_gen = gen = None
+ self._cache_complete = True
+ break
+ release()
+ yield cache[i]
+ i += 1
+ while i < self._len:
+ yield cache[i]
+ i += 1
+
+ def __getitem__(self, item):
+ if self._cache_complete:
+ return self._cache[item]
+ elif isinstance(item, slice):
+ if item.step and item.step < 0:
+ return list(iter(self))[item]
+ else:
+ return list(itertools.islice(self,
+ item.start or 0,
+ item.stop or sys.maxsize,
+ item.step or 1))
+ elif item >= 0:
+ gen = iter(self)
+ try:
+ for i in range(item+1):
+ res = advance_iterator(gen)
+ except StopIteration:
+ raise IndexError
+ return res
+ else:
+ return list(iter(self))[item]
+
+ def __contains__(self, item):
+ if self._cache_complete:
+ return item in self._cache
+ else:
+ for i in self:
+ if i == item:
+ return True
+ elif i > item:
+ return False
+ return False
+
+ # __len__() introduces a large performance penality.
+ def count(self):
+ """ Returns the number of recurrences in this set. It will have go
+ trough the whole recurrence, if this hasn't been done before. """
+ if self._len is None:
+ for x in self:
+ pass
+ return self._len
+
+ def before(self, dt, inc=False):
+ """ Returns the last recurrence before the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ last = None
+ if inc:
+ for i in gen:
+ if i > dt:
+ break
+ last = i
+ else:
+ for i in gen:
+ if i >= dt:
+ break
+ last = i
+ return last
+
+ def after(self, dt, inc=False):
+ """ Returns the first recurrence after the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ if inc:
+ for i in gen:
+ if i >= dt:
+ return i
+ else:
+ for i in gen:
+ if i > dt:
+ return i
+ return None
+
+ def xafter(self, dt, count=None, inc=False):
+ """
+ Generator which yields up to `count` recurrences after the given
+ datetime instance, equivalent to `after`.
+
+ :param dt:
+ The datetime at which to start generating recurrences.
+
+ :param count:
+ The maximum number of recurrences to generate. If `None` (default),
+ dates are generated until the recurrence rule is exhausted.
+
+ :param inc:
+ If `dt` is an instance of the rule and `inc` is `True`, it is
+ included in the output.
+
+ :yields: Yields a sequence of `datetime` objects.
+ """
+
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+
+ # Select the comparison function
+ if inc:
+ comp = lambda dc, dtc: dc >= dtc
+ else:
+ comp = lambda dc, dtc: dc > dtc
+
+ # Generate dates
+ n = 0
+ for d in gen:
+ if comp(d, dt):
+ yield d
+
+ if count is not None:
+ n += 1
+ if n >= count:
+ break
+
+ def between(self, after, before, inc=False, count=1):
+ """ Returns all the occurrences of the rrule between after and before.
+ The inc keyword defines what happens if after and/or before are
+ themselves occurrences. With inc=True, they will be included in the
+ list, if they are found in the recurrence set. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ started = False
+ l = []
+ if inc:
+ for i in gen:
+ if i > before:
+ break
+ elif not started:
+ if i >= after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ else:
+ for i in gen:
+ if i >= before:
+ break
+ elif not started:
+ if i > after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ return l
+
+
+class rrule(rrulebase):
+ """
+ That's the base of the rrule operation. It accepts all the keywords
+ defined in the RFC as its constructor parameters (except byday,
+ which was renamed to byweekday) and more. The constructor prototype is::
+
+ rrule(freq)
+
+ Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
+ or SECONDLY.
+
+ .. note::
+ Per RFC section 3.3.10, recurrence instances falling on invalid dates
+ and times are ignored rather than coerced:
+
+ Recurrence rules may generate recurrence instances with an invalid
+ date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
+ on a day where the local time is moved forward by an hour at 1:00
+ AM). Such recurrence instances MUST be ignored and MUST NOT be
+ counted as part of the recurrence set.
+
+ This can lead to possibly surprising behavior when, for example, the
+ start date occurs at the end of the month:
+
+ >>> from dateutil.rrule import rrule, MONTHLY
+ >>> from datetime import datetime
+ >>> start_date = datetime(2014, 12, 31)
+ >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [datetime.datetime(2014, 12, 31, 0, 0),
+ datetime.datetime(2015, 1, 31, 0, 0),
+ datetime.datetime(2015, 3, 31, 0, 0),
+ datetime.datetime(2015, 5, 31, 0, 0)]
+
+ Additionally, it supports the following keyword arguments:
+
+ :param cache:
+ If given, it must be a boolean value specifying to enable or disable
+ caching of results. If you will use the same rrule instance multiple
+ times, enabling caching will improve the performance considerably.
+ :param dtstart:
+ The recurrence start. Besides being the base for the recurrence,
+ missing parameters in the final recurrence instances will also be
+ extracted from this date. If not given, datetime.now() will be used
+ instead.
+ :param interval:
+ The interval between each freq iteration. For example, when using
+ YEARLY, an interval of 2 means once every two years, but with HOURLY,
+ it means once every two hours. The default interval is 1.
+ :param wkst:
+ The week start day. Must be one of the MO, TU, WE constants, or an
+ integer, specifying the first day of the week. This will affect
+ recurrences based on weekly periods. The default week start is got
+ from calendar.firstweekday(), and may be modified by
+ calendar.setfirstweekday().
+ :param count:
+ How many occurrences will be generated.
+
+ .. note::
+ As of version 2.5.0, the use of the ``until`` keyword together
+ with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
+ :param until:
+ If given, this must be a datetime instance, that will specify the
+ limit of the recurrence. The last recurrence in the rule is the greatest
+ datetime that is less than or equal to the value specified in the
+ ``until`` parameter.
+
+ .. note::
+ As of version 2.5.0, the use of the ``until`` keyword together
+ with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
+ :param bysetpos:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each given integer will specify an occurrence
+ number, corresponding to the nth occurrence of the rule inside the
+ frequency period. For example, a bysetpos of -1 if combined with a
+ MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
+ result in the last work day of every month.
+ :param bymonth:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the months to apply the recurrence to.
+ :param bymonthday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the month days to apply the recurrence to.
+ :param byyearday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the year days to apply the recurrence to.
+ :param byweekno:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the week numbers to apply the recurrence to. Week numbers
+ have the meaning described in ISO8601, that is, the first week of
+ the year is that containing at least four days of the new year.
+ :param byweekday:
+ If given, it must be either an integer (0 == MO), a sequence of
+ integers, one of the weekday constants (MO, TU, etc), or a sequence
+ of these constants. When given, these variables will define the
+ weekdays where the recurrence will be applied. It's also possible to
+ use an argument n for the weekday instances, which will mean the nth
+ occurrence of this weekday in the period. For example, with MONTHLY,
+ or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
+ first friday of the month where the recurrence happens. Notice that in
+ the RFC documentation, this is specified as BYDAY, but was renamed to
+ avoid the ambiguity of that keyword.
+ :param byhour:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the hours to apply the recurrence to.
+ :param byminute:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the minutes to apply the recurrence to.
+ :param bysecond:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the seconds to apply the recurrence to.
+ :param byeaster:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each integer will define an offset from the
+ Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
+ Sunday itself. This is an extension to the RFC specification.
+ """
+ def __init__(self, freq, dtstart=None,
+ interval=1, wkst=None, count=None, until=None, bysetpos=None,
+ bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
+ byweekno=None, byweekday=None,
+ byhour=None, byminute=None, bysecond=None,
+ cache=False):
+ super(rrule, self).__init__(cache)
+ global easter
+ if not dtstart:
+ dtstart = datetime.datetime.now().replace(microsecond=0)
+ elif not isinstance(dtstart, datetime.datetime):
+ dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
+ else:
+ dtstart = dtstart.replace(microsecond=0)
+ self._dtstart = dtstart
+ self._tzinfo = dtstart.tzinfo
+ self._freq = freq
+ self._interval = interval
+ self._count = count
+
+ # Cache the original byxxx rules, if they are provided, as the _byxxx
+ # attributes do not necessarily map to the inputs, and this can be
+ # a problem in generating the strings. Only store things if they've
+ # been supplied (the string retrieval will just use .get())
+ self._original_rule = {}
+
+ if until and not isinstance(until, datetime.datetime):
+ until = datetime.datetime.fromordinal(until.toordinal())
+ self._until = until
+
+ if count and until:
+ warn("Using both 'count' and 'until' is inconsistent with RFC 2445"
+ " and has been deprecated in dateutil. Future versions will "
+ "raise an error.", DeprecationWarning)
+
+ if wkst is None:
+ self._wkst = calendar.firstweekday()
+ elif isinstance(wkst, integer_types):
+ self._wkst = wkst
+ else:
+ self._wkst = wkst.weekday
+
+ if bysetpos is None:
+ self._bysetpos = None
+ elif isinstance(bysetpos, integer_types):
+ if bysetpos == 0 or not (-366 <= bysetpos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+ self._bysetpos = (bysetpos,)
+ else:
+ self._bysetpos = tuple(bysetpos)
+ for pos in self._bysetpos:
+ if pos == 0 or not (-366 <= pos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+
+ if self._bysetpos:
+ self._original_rule['bysetpos'] = self._bysetpos
+
+ if (byweekno is None and byyearday is None and bymonthday is None and
+ byweekday is None and byeaster is None):
+ if freq == YEARLY:
+ if bymonth is None:
+ bymonth = dtstart.month
+ self._original_rule['bymonth'] = None
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == MONTHLY:
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == WEEKLY:
+ byweekday = dtstart.weekday()
+ self._original_rule['byweekday'] = None
+
+ # bymonth
+ if bymonth is None:
+ self._bymonth = None
+ else:
+ if isinstance(bymonth, integer_types):
+ bymonth = (bymonth,)
+
+ self._bymonth = tuple(sorted(set(bymonth)))
+
+ if 'bymonth' not in self._original_rule:
+ self._original_rule['bymonth'] = self._bymonth
+
+ # byyearday
+ if byyearday is None:
+ self._byyearday = None
+ else:
+ if isinstance(byyearday, integer_types):
+ byyearday = (byyearday,)
+
+ self._byyearday = tuple(sorted(set(byyearday)))
+ self._original_rule['byyearday'] = self._byyearday
+
+ # byeaster
+ if byeaster is not None:
+ if not easter:
+ from dateutil import easter
+ if isinstance(byeaster, integer_types):
+ self._byeaster = (byeaster,)
+ else:
+ self._byeaster = tuple(sorted(byeaster))
+
+ self._original_rule['byeaster'] = self._byeaster
+ else:
+ self._byeaster = None
+
+ # bymonthday
+ if bymonthday is None:
+ self._bymonthday = ()
+ self._bynmonthday = ()
+ else:
+ if isinstance(bymonthday, integer_types):
+ bymonthday = (bymonthday,)
+
+ bymonthday = set(bymonthday) # Ensure it's unique
+
+ self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0]))
+ self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0]))
+
+ # Storing positive numbers first, then negative numbers
+ if 'bymonthday' not in self._original_rule:
+ self._original_rule['bymonthday'] = tuple(
+ itertools.chain(self._bymonthday, self._bynmonthday))
+
+ # byweekno
+ if byweekno is None:
+ self._byweekno = None
+ else:
+ if isinstance(byweekno, integer_types):
+ byweekno = (byweekno,)
+
+ self._byweekno = tuple(sorted(set(byweekno)))
+
+ self._original_rule['byweekno'] = self._byweekno
+
+ # byweekday / bynweekday
+ if byweekday is None:
+ self._byweekday = None
+ self._bynweekday = None
+ else:
+ # If it's one of the valid non-sequence types, convert to a
+ # single-element sequence before the iterator that builds the
+ # byweekday set.
+ if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
+ byweekday = (byweekday,)
+
+ self._byweekday = set()
+ self._bynweekday = set()
+ for wday in byweekday:
+ if isinstance(wday, integer_types):
+ self._byweekday.add(wday)
+ elif not wday.n or freq > MONTHLY:
+ self._byweekday.add(wday.weekday)
+ else:
+ self._bynweekday.add((wday.weekday, wday.n))
+
+ if not self._byweekday:
+ self._byweekday = None
+ elif not self._bynweekday:
+ self._bynweekday = None
+
+ if self._byweekday is not None:
+ self._byweekday = tuple(sorted(self._byweekday))
+ orig_byweekday = [weekday(x) for x in self._byweekday]
+ else:
+ orig_byweekday = tuple()
+
+ if self._bynweekday is not None:
+ self._bynweekday = tuple(sorted(self._bynweekday))
+ orig_bynweekday = [weekday(*x) for x in self._bynweekday]
+ else:
+ orig_bynweekday = tuple()
+
+ if 'byweekday' not in self._original_rule:
+ self._original_rule['byweekday'] = tuple(itertools.chain(
+ orig_byweekday, orig_bynweekday))
+
+ # byhour
+ if byhour is None:
+ if freq < HOURLY:
+ self._byhour = set((dtstart.hour,))
+ else:
+ self._byhour = None
+ else:
+ if isinstance(byhour, integer_types):
+ byhour = (byhour,)
+
+ if freq == HOURLY:
+ self._byhour = self.__construct_byset(start=dtstart.hour,
+ byxxx=byhour,
+ base=24)
+ else:
+ self._byhour = set(byhour)
+
+ self._byhour = tuple(sorted(self._byhour))
+ self._original_rule['byhour'] = self._byhour
+
+ # byminute
+ if byminute is None:
+ if freq < MINUTELY:
+ self._byminute = set((dtstart.minute,))
+ else:
+ self._byminute = None
+ else:
+ if isinstance(byminute, integer_types):
+ byminute = (byminute,)
+
+ if freq == MINUTELY:
+ self._byminute = self.__construct_byset(start=dtstart.minute,
+ byxxx=byminute,
+ base=60)
+ else:
+ self._byminute = set(byminute)
+
+ self._byminute = tuple(sorted(self._byminute))
+ self._original_rule['byminute'] = self._byminute
+
+ # bysecond
+ if bysecond is None:
+ if freq < SECONDLY:
+ self._bysecond = ((dtstart.second,))
+ else:
+ self._bysecond = None
+ else:
+ if isinstance(bysecond, integer_types):
+ bysecond = (bysecond,)
+
+ self._bysecond = set(bysecond)
+
+ if freq == SECONDLY:
+ self._bysecond = self.__construct_byset(start=dtstart.second,
+ byxxx=bysecond,
+ base=60)
+ else:
+ self._bysecond = set(bysecond)
+
+ self._bysecond = tuple(sorted(self._bysecond))
+ self._original_rule['bysecond'] = self._bysecond
+
+ if self._freq >= HOURLY:
+ self._timeset = None
+ else:
+ self._timeset = []
+ for hour in self._byhour:
+ for minute in self._byminute:
+ for second in self._bysecond:
+ self._timeset.append(
+ datetime.time(hour, minute, second,
+ tzinfo=self._tzinfo))
+ self._timeset.sort()
+ self._timeset = tuple(self._timeset)
+
+ def __str__(self):
+ """
+ Output a string that would generate this RRULE if passed to rrulestr.
+ This is mostly compatible with RFC2445, except for the
+ dateutil-specific extension BYEASTER.
+ """
+
+ output = []
+ h, m, s = [None] * 3
+ if self._dtstart:
+ output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
+ h, m, s = self._dtstart.timetuple()[3:6]
+
+ parts = ['FREQ=' + FREQNAMES[self._freq]]
+ if self._interval != 1:
+ parts.append('INTERVAL=' + str(self._interval))
+
+ if self._wkst:
+ parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
+
+ if self._count:
+ parts.append('COUNT=' + str(self._count))
+
+ if self._until:
+ parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
+
+ if self._original_rule.get('byweekday') is not None:
+ # The str() method on weekday objects doesn't generate
+ # RFC2445-compliant strings, so we should modify that.
+ original_rule = dict(self._original_rule)
+ wday_strings = []
+ for wday in original_rule['byweekday']:
+ if wday.n:
+ wday_strings.append('{n:+d}{wday}'.format(
+ n=wday.n,
+ wday=repr(wday)[0:2]))
+ else:
+ wday_strings.append(repr(wday))
+
+ original_rule['byweekday'] = wday_strings
+ else:
+ original_rule = self._original_rule
+
+ partfmt = '{name}={vals}'
+ for name, key in [('BYSETPOS', 'bysetpos'),
+ ('BYMONTH', 'bymonth'),
+ ('BYMONTHDAY', 'bymonthday'),
+ ('BYYEARDAY', 'byyearday'),
+ ('BYWEEKNO', 'byweekno'),
+ ('BYDAY', 'byweekday'),
+ ('BYHOUR', 'byhour'),
+ ('BYMINUTE', 'byminute'),
+ ('BYSECOND', 'bysecond'),
+ ('BYEASTER', 'byeaster')]:
+ value = original_rule.get(key)
+ if value:
+ parts.append(partfmt.format(name=name, vals=(','.join(str(v)
+ for v in value))))
+
+ output.append(';'.join(parts))
+ return '\n'.join(output)
+
+ def replace(self, **kwargs):
+ """Return new rrule with same attributes except for those attributes given new
+ values by whichever keyword arguments are specified."""
+ new_kwargs = {"interval": self._interval,
+ "count": self._count,
+ "dtstart": self._dtstart,
+ "freq": self._freq,
+ "until": self._until,
+ "wkst": self._wkst,
+ "cache": False if self._cache is None else True }
+ new_kwargs.update(self._original_rule)
+ new_kwargs.update(kwargs)
+ return rrule(**new_kwargs)
+
+
+ def _iter(self):
+ year, month, day, hour, minute, second, weekday, yearday, _ = \
+ self._dtstart.timetuple()
+
+ # Some local variables to speed things up a bit
+ freq = self._freq
+ interval = self._interval
+ wkst = self._wkst
+ until = self._until
+ bymonth = self._bymonth
+ byweekno = self._byweekno
+ byyearday = self._byyearday
+ byweekday = self._byweekday
+ byeaster = self._byeaster
+ bymonthday = self._bymonthday
+ bynmonthday = self._bynmonthday
+ bysetpos = self._bysetpos
+ byhour = self._byhour
+ byminute = self._byminute
+ bysecond = self._bysecond
+
+ ii = _iterinfo(self)
+ ii.rebuild(year, month)
+
+ getdayset = {YEARLY: ii.ydayset,
+ MONTHLY: ii.mdayset,
+ WEEKLY: ii.wdayset,
+ DAILY: ii.ddayset,
+ HOURLY: ii.ddayset,
+ MINUTELY: ii.ddayset,
+ SECONDLY: ii.ddayset}[freq]
+
+ if freq < HOURLY:
+ timeset = self._timeset
+ else:
+ gettimeset = {HOURLY: ii.htimeset,
+ MINUTELY: ii.mtimeset,
+ SECONDLY: ii.stimeset}[freq]
+ if ((freq >= HOURLY and
+ self._byhour and hour not in self._byhour) or
+ (freq >= MINUTELY and
+ self._byminute and minute not in self._byminute) or
+ (freq >= SECONDLY and
+ self._bysecond and second not in self._bysecond)):
+ timeset = ()
+ else:
+ timeset = gettimeset(hour, minute, second)
+
+ total = 0
+ count = self._count
+ while True:
+ # Get dayset with the right frequency
+ dayset, start, end = getdayset(year, month, day)
+
+ # Do the "hard" work ;-)
+ filtered = False
+ for i in dayset[start:end]:
+ if ((bymonth and ii.mmask[i] not in bymonth) or
+ (byweekno and not ii.wnomask[i]) or
+ (byweekday and ii.wdaymask[i] not in byweekday) or
+ (ii.nwdaymask and not ii.nwdaymask[i]) or
+ (byeaster and not ii.eastermask[i]) or
+ ((bymonthday or bynmonthday) and
+ ii.mdaymask[i] not in bymonthday and
+ ii.nmdaymask[i] not in bynmonthday) or
+ (byyearday and
+ ((i < ii.yearlen and i+1 not in byyearday and
+ -ii.yearlen+i not in byyearday) or
+ (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
+ -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
+ dayset[i] = None
+ filtered = True
+
+ # Output results
+ if bysetpos and timeset:
+ poslist = []
+ for pos in bysetpos:
+ if pos < 0:
+ daypos, timepos = divmod(pos, len(timeset))
+ else:
+ daypos, timepos = divmod(pos-1, len(timeset))
+ try:
+ i = [x for x in dayset[start:end]
+ if x is not None][daypos]
+ time = timeset[timepos]
+ except IndexError:
+ pass
+ else:
+ date = datetime.date.fromordinal(ii.yearordinal+i)
+ res = datetime.datetime.combine(date, time)
+ if res not in poslist:
+ poslist.append(res)
+ poslist.sort()
+ for res in poslist:
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ total += 1
+ yield res
+ if count:
+ count -= 1
+ if not count:
+ self._len = total
+ return
+ else:
+ for i in dayset[start:end]:
+ if i is not None:
+ date = datetime.date.fromordinal(ii.yearordinal + i)
+ for time in timeset:
+ res = datetime.datetime.combine(date, time)
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ total += 1
+ yield res
+ if count:
+ count -= 1
+ if not count:
+ self._len = total
+ return
+
+ # Handle frequency and interval
+ fixday = False
+ if freq == YEARLY:
+ year += interval
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == MONTHLY:
+ month += interval
+ if month > 12:
+ div, mod = divmod(month, 12)
+ month = mod
+ year += div
+ if month == 0:
+ month = 12
+ year -= 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == WEEKLY:
+ if wkst > weekday:
+ day += -(weekday+1+(6-wkst))+self._interval*7
+ else:
+ day += -(weekday-wkst)+self._interval*7
+ weekday = wkst
+ fixday = True
+ elif freq == DAILY:
+ day += interval
+ fixday = True
+ elif freq == HOURLY:
+ if filtered:
+ # Jump to one iteration before next day
+ hour += ((23-hour)//interval)*interval
+
+ if byhour:
+ ndays, hour = self.__mod_distance(value=hour,
+ byxxx=self._byhour,
+ base=24)
+ else:
+ ndays, hour = divmod(hour+interval, 24)
+
+ if ndays:
+ day += ndays
+ fixday = True
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == MINUTELY:
+ if filtered:
+ # Jump to one iteration before next day
+ minute += ((1439-(hour*60+minute))//interval)*interval
+
+ valid = False
+ rep_rate = (24*60)
+ for j in range(rep_rate // gcd(interval, rep_rate)):
+ if byminute:
+ nhours, minute = \
+ self.__mod_distance(value=minute,
+ byxxx=self._byminute,
+ base=60)
+ else:
+ nhours, minute = divmod(minute+interval, 60)
+
+ div, hour = divmod(hour+nhours, 24)
+ if div:
+ day += div
+ fixday = True
+ filtered = False
+
+ if not byhour or hour in byhour:
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval and ' +
+ 'byhour resulting in empty rule.')
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == SECONDLY:
+ if filtered:
+ # Jump to one iteration before next day
+ second += (((86399 - (hour * 3600 + minute * 60 + second))
+ // interval) * interval)
+
+ rep_rate = (24 * 3600)
+ valid = False
+ for j in range(0, rep_rate // gcd(interval, rep_rate)):
+ if bysecond:
+ nminutes, second = \
+ self.__mod_distance(value=second,
+ byxxx=self._bysecond,
+ base=60)
+ else:
+ nminutes, second = divmod(second+interval, 60)
+
+ div, minute = divmod(minute+nminutes, 60)
+ if div:
+ hour += div
+ div, hour = divmod(hour, 24)
+ if div:
+ day += div
+ fixday = True
+
+ if ((not byhour or hour in byhour) and
+ (not byminute or minute in byminute) and
+ (not bysecond or second in bysecond)):
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval, ' +
+ 'byhour and byminute resulting in empty' +
+ ' rule.')
+
+ timeset = gettimeset(hour, minute, second)
+
+ if fixday and day > 28:
+ daysinmonth = calendar.monthrange(year, month)[1]
+ if day > daysinmonth:
+ while day > daysinmonth:
+ day -= daysinmonth
+ month += 1
+ if month == 13:
+ month = 1
+ year += 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ daysinmonth = calendar.monthrange(year, month)[1]
+ ii.rebuild(year, month)
+
+ def __construct_byset(self, start, byxxx, base):
+ """
+ If a `BYXXX` sequence is passed to the constructor at the same level as
+ `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
+ specifications which cannot be reached given some starting conditions.
+
+ This occurs whenever the interval is not coprime with the base of a
+ given unit and the difference between the starting position and the
+ ending position is not coprime with the greatest common denominator
+ between the interval and the base. For example, with a FREQ of hourly
+ starting at 17:00 and an interval of 4, the only valid values for
+ BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
+ coprime.
+
+ :param start:
+ Specifies the starting position.
+ :param byxxx:
+ An iterable containing the list of allowed values.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ This does not preserve the type of the iterable, returning a set, since
+ the values should be unique and the order is irrelevant, this will
+ speed up later lookups.
+
+ In the event of an empty set, raises a :exception:`ValueError`, as this
+ results in an empty rrule.
+ """
+
+ cset = set()
+
+ # Support a single byxxx value.
+ if isinstance(byxxx, integer_types):
+ byxxx = (byxxx, )
+
+ for num in byxxx:
+ i_gcd = gcd(self._interval, base)
+ # Use divmod rather than % because we need to wrap negative nums.
+ if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
+ cset.add(num)
+
+ if len(cset) == 0:
+ raise ValueError("Invalid rrule byxxx generates an empty set.")
+
+ return cset
+
+ def __mod_distance(self, value, byxxx, base):
+ """
+ Calculates the next value in a sequence where the `FREQ` parameter is
+ specified along with a `BYXXX` parameter at the same "level"
+ (e.g. `HOURLY` specified with `BYHOUR`).
+
+ :param value:
+ The old value of the component.
+ :param byxxx:
+ The `BYXXX` set, which should have been generated by
+ `rrule._construct_byset`, or something else which checks that a
+ valid rule is present.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ If a valid value is not found after `base` iterations (the maximum
+ number before the sequence would start to repeat), this raises a
+ :exception:`ValueError`, as no valid values were found.
+
+ This returns a tuple of `divmod(n*interval, base)`, where `n` is the
+ smallest number of `interval` repetitions until the next specified
+ value in `byxxx` is found.
+ """
+ accumulator = 0
+ for ii in range(1, base + 1):
+ # Using divmod() over % to account for negative intervals
+ div, value = divmod(value + self._interval, base)
+ accumulator += div
+ if value in byxxx:
+ return (accumulator, value)
+
+
+class _iterinfo(object):
+ __slots__ = ["rrule", "lastyear", "lastmonth",
+ "yearlen", "nextyearlen", "yearordinal", "yearweekday",
+ "mmask", "mrange", "mdaymask", "nmdaymask",
+ "wdaymask", "wnomask", "nwdaymask", "eastermask"]
+
+ def __init__(self, rrule):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+ self.rrule = rrule
+
+ def rebuild(self, year, month):
+ # Every mask is 7 days longer to handle cross-year weekly periods.
+ rr = self.rrule
+ if year != self.lastyear:
+ self.yearlen = 365 + calendar.isleap(year)
+ self.nextyearlen = 365 + calendar.isleap(year + 1)
+ firstyday = datetime.date(year, 1, 1)
+ self.yearordinal = firstyday.toordinal()
+ self.yearweekday = firstyday.weekday()
+
+ wday = datetime.date(year, 1, 1).weekday()
+ if self.yearlen == 365:
+ self.mmask = M365MASK
+ self.mdaymask = MDAY365MASK
+ self.nmdaymask = NMDAY365MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M365RANGE
+ else:
+ self.mmask = M366MASK
+ self.mdaymask = MDAY366MASK
+ self.nmdaymask = NMDAY366MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M366RANGE
+
+ if not rr._byweekno:
+ self.wnomask = None
+ else:
+ self.wnomask = [0]*(self.yearlen+7)
+ # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
+ no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
+ if no1wkst >= 4:
+ no1wkst = 0
+ # Number of days in the year, plus the days we got
+ # from last year.
+ wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
+ else:
+ # Number of days in the year, minus the days we
+ # left in last year.
+ wyearlen = self.yearlen-no1wkst
+ div, mod = divmod(wyearlen, 7)
+ numweeks = div+mod//4
+ for n in rr._byweekno:
+ if n < 0:
+ n += numweeks+1
+ if not (0 < n <= numweeks):
+ continue
+ if n > 1:
+ i = no1wkst+(n-1)*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ else:
+ i = no1wkst
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if 1 in rr._byweekno:
+ # Check week number 1 of next year as well
+ # TODO: Check -numweeks for next year.
+ i = no1wkst+numweeks*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ if i < self.yearlen:
+ # If week starts in next year, we
+ # don't care about it.
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if no1wkst:
+ # Check last week number of last year as
+ # well. If no1wkst is 0, either the year
+ # started on week start, or week number 1
+ # got days from last year, so there are no
+ # days from last year's last week number in
+ # this year.
+ if -1 not in rr._byweekno:
+ lyearweekday = datetime.date(year-1, 1, 1).weekday()
+ lno1wkst = (7-lyearweekday+rr._wkst) % 7
+ lyearlen = 365+calendar.isleap(year-1)
+ if lno1wkst >= 4:
+ lno1wkst = 0
+ lnumweeks = 52+(lyearlen +
+ (lyearweekday-rr._wkst) % 7) % 7//4
+ else:
+ lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
+ else:
+ lnumweeks = -1
+ if lnumweeks in rr._byweekno:
+ for i in range(no1wkst):
+ self.wnomask[i] = 1
+
+ if (rr._bynweekday and (month != self.lastmonth or
+ year != self.lastyear)):
+ ranges = []
+ if rr._freq == YEARLY:
+ if rr._bymonth:
+ for month in rr._bymonth:
+ ranges.append(self.mrange[month-1:month+1])
+ else:
+ ranges = [(0, self.yearlen)]
+ elif rr._freq == MONTHLY:
+ ranges = [self.mrange[month-1:month+1]]
+ if ranges:
+ # Weekly frequency won't get here, so we may not
+ # care about cross-year weekly periods.
+ self.nwdaymask = [0]*self.yearlen
+ for first, last in ranges:
+ last -= 1
+ for wday, n in rr._bynweekday:
+ if n < 0:
+ i = last+(n+1)*7
+ i -= (self.wdaymask[i]-wday) % 7
+ else:
+ i = first+(n-1)*7
+ i += (7-self.wdaymask[i]+wday) % 7
+ if first <= i <= last:
+ self.nwdaymask[i] = 1
+
+ if rr._byeaster:
+ self.eastermask = [0]*(self.yearlen+7)
+ eyday = easter.easter(year).toordinal()-self.yearordinal
+ for offset in rr._byeaster:
+ self.eastermask[eyday+offset] = 1
+
+ self.lastyear = year
+ self.lastmonth = month
+
+ def ydayset(self, year, month, day):
+ return list(range(self.yearlen)), 0, self.yearlen
+
+ def mdayset(self, year, month, day):
+ dset = [None]*self.yearlen
+ start, end = self.mrange[month-1:month+1]
+ for i in range(start, end):
+ dset[i] = i
+ return dset, start, end
+
+ def wdayset(self, year, month, day):
+ # We need to handle cross-year weeks here.
+ dset = [None]*(self.yearlen+7)
+ i = datetime.date(year, month, day).toordinal()-self.yearordinal
+ start = i
+ for j in range(7):
+ dset[i] = i
+ i += 1
+ # if (not (0 <= i < self.yearlen) or
+ # self.wdaymask[i] == self.rrule._wkst):
+ # This will cross the year boundary, if necessary.
+ if self.wdaymask[i] == self.rrule._wkst:
+ break
+ return dset, start, i
+
+ def ddayset(self, year, month, day):
+ dset = [None] * self.yearlen
+ i = datetime.date(year, month, day).toordinal() - self.yearordinal
+ dset[i] = i
+ return dset, i, i + 1
+
+ def htimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for minute in rr._byminute:
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second,
+ tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def mtimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def stimeset(self, hour, minute, second):
+ return (datetime.time(hour, minute, second,
+ tzinfo=self.rrule._tzinfo),)
+
+
+class rruleset(rrulebase):
+ """ The rruleset type allows more complex recurrence setups, mixing
+ multiple rules, dates, exclusion rules, and exclusion dates. The type
+ constructor takes the following keyword arguments:
+
+ :param cache: If True, caching of results will be enabled, improving
+ performance of multiple queries considerably. """
+
+ class _genitem(object):
+ def __init__(self, genlist, gen):
+ try:
+ self.dt = advance_iterator(gen)
+ genlist.append(self)
+ except StopIteration:
+ pass
+ self.genlist = genlist
+ self.gen = gen
+
+ def __next__(self):
+ try:
+ self.dt = advance_iterator(self.gen)
+ except StopIteration:
+ if self.genlist[0] is self:
+ heapq.heappop(self.genlist)
+ else:
+ self.genlist.remove(self)
+ heapq.heapify(self.genlist)
+
+ next = __next__
+
+ def __lt__(self, other):
+ return self.dt < other.dt
+
+ def __gt__(self, other):
+ return self.dt > other.dt
+
+ def __eq__(self, other):
+ return self.dt == other.dt
+
+ def __ne__(self, other):
+ return self.dt != other.dt
+
+ def __init__(self, cache=False):
+ super(rruleset, self).__init__(cache)
+ self._rrule = []
+ self._rdate = []
+ self._exrule = []
+ self._exdate = []
+
+ @_invalidates_cache
+ def rrule(self, rrule):
+ """ Include the given :py:class:`rrule` instance in the recurrence set
+ generation. """
+ self._rrule.append(rrule)
+
+ @_invalidates_cache
+ def rdate(self, rdate):
+ """ Include the given :py:class:`datetime` instance in the recurrence
+ set generation. """
+ self._rdate.append(rdate)
+
+ @_invalidates_cache
+ def exrule(self, exrule):
+ """ Include the given rrule instance in the recurrence set exclusion
+ list. Dates which are part of the given recurrence rules will not
+ be generated, even if some inclusive rrule or rdate matches them.
+ """
+ self._exrule.append(exrule)
+
+ @_invalidates_cache
+ def exdate(self, exdate):
+ """ Include the given datetime instance in the recurrence set
+ exclusion list. Dates included that way will not be generated,
+ even if some inclusive rrule or rdate matches them. """
+ self._exdate.append(exdate)
+
+ def _iter(self):
+ rlist = []
+ self._rdate.sort()
+ self._genitem(rlist, iter(self._rdate))
+ for gen in [iter(x) for x in self._rrule]:
+ self._genitem(rlist, gen)
+ exlist = []
+ self._exdate.sort()
+ self._genitem(exlist, iter(self._exdate))
+ for gen in [iter(x) for x in self._exrule]:
+ self._genitem(exlist, gen)
+ lastdt = None
+ total = 0
+ heapq.heapify(rlist)
+ heapq.heapify(exlist)
+ while rlist:
+ ritem = rlist[0]
+ if not lastdt or lastdt != ritem.dt:
+ while exlist and exlist[0] < ritem:
+ exitem = exlist[0]
+ advance_iterator(exitem)
+ if exlist and exlist[0] is exitem:
+ heapq.heapreplace(exlist, exitem)
+ if not exlist or ritem != exlist[0]:
+ total += 1
+ yield ritem.dt
+ lastdt = ritem.dt
+ advance_iterator(ritem)
+ if rlist and rlist[0] is ritem:
+ heapq.heapreplace(rlist, ritem)
+ self._len = total
+
+
+class _rrulestr(object):
+
+ _freq_map = {"YEARLY": YEARLY,
+ "MONTHLY": MONTHLY,
+ "WEEKLY": WEEKLY,
+ "DAILY": DAILY,
+ "HOURLY": HOURLY,
+ "MINUTELY": MINUTELY,
+ "SECONDLY": SECONDLY}
+
+ _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
+ "FR": 4, "SA": 5, "SU": 6}
+
+ def _handle_int(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = int(value)
+
+ def _handle_int_list(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
+
+ _handle_INTERVAL = _handle_int
+ _handle_COUNT = _handle_int
+ _handle_BYSETPOS = _handle_int_list
+ _handle_BYMONTH = _handle_int_list
+ _handle_BYMONTHDAY = _handle_int_list
+ _handle_BYYEARDAY = _handle_int_list
+ _handle_BYEASTER = _handle_int_list
+ _handle_BYWEEKNO = _handle_int_list
+ _handle_BYHOUR = _handle_int_list
+ _handle_BYMINUTE = _handle_int_list
+ _handle_BYSECOND = _handle_int_list
+
+ def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["freq"] = self._freq_map[value]
+
+ def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
+ global parser
+ if not parser:
+ from dateutil import parser
+ try:
+ rrkwargs["until"] = parser.parse(value,
+ ignoretz=kwargs.get("ignoretz"),
+ tzinfos=kwargs.get("tzinfos"))
+ except ValueError:
+ raise ValueError("invalid until date")
+
+ def _handle_WKST(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["wkst"] = self._weekday_map[value]
+
+ def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
+ """
+ Two ways to specify this: +1MO or MO(+1)
+ """
+ l = []
+ for wday in value.split(','):
+ if '(' in wday:
+ # If it's of the form TH(+1), etc.
+ splt = wday.split('(')
+ w = splt[0]
+ n = int(splt[1][:-1])
+ elif len(wday):
+ # If it's of the form +1MO
+ for i in range(len(wday)):
+ if wday[i] not in '+-0123456789':
+ break
+ n = wday[:i] or None
+ w = wday[i:]
+ if n:
+ n = int(n)
+ else:
+ raise ValueError("Invalid (empty) BYDAY specification.")
+
+ l.append(weekdays[self._weekday_map[w]](n))
+ rrkwargs["byweekday"] = l
+
+ _handle_BYDAY = _handle_BYWEEKDAY
+
+ def _parse_rfc_rrule(self, line,
+ dtstart=None,
+ cache=False,
+ ignoretz=False,
+ tzinfos=None):
+ if line.find(':') != -1:
+ name, value = line.split(':')
+ if name != "RRULE":
+ raise ValueError("unknown parameter name")
+ else:
+ value = line
+ rrkwargs = {}
+ for pair in value.split(';'):
+ name, value = pair.split('=')
+ name = name.upper()
+ value = value.upper()
+ try:
+ getattr(self, "_handle_"+name)(rrkwargs, name, value,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ except AttributeError:
+ raise ValueError("unknown parameter '%s'" % name)
+ except (KeyError, ValueError):
+ raise ValueError("invalid '%s': %s" % (name, value))
+ return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
+
+ def _parse_rfc(self, s,
+ dtstart=None,
+ cache=False,
+ unfold=False,
+ forceset=False,
+ compatible=False,
+ ignoretz=False,
+ tzinfos=None):
+ global parser
+ if compatible:
+ forceset = True
+ unfold = True
+ s = s.upper()
+ if not s.strip():
+ raise ValueError("empty string")
+ if unfold:
+ lines = s.splitlines()
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+ else:
+ lines = s.split()
+ if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
+ s.startswith('RRULE:'))):
+ return self._parse_rfc_rrule(lines[0], cache=cache,
+ dtstart=dtstart, ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ else:
+ rrulevals = []
+ rdatevals = []
+ exrulevals = []
+ exdatevals = []
+ for line in lines:
+ if not line:
+ continue
+ if line.find(':') == -1:
+ name = "RRULE"
+ value = line
+ else:
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0]
+ parms = parms[1:]
+ if name == "RRULE":
+ for parm in parms:
+ raise ValueError("unsupported RRULE parm: "+parm)
+ rrulevals.append(value)
+ elif name == "RDATE":
+ for parm in parms:
+ if parm != "VALUE=DATE-TIME":
+ raise ValueError("unsupported RDATE parm: "+parm)
+ rdatevals.append(value)
+ elif name == "EXRULE":
+ for parm in parms:
+ raise ValueError("unsupported EXRULE parm: "+parm)
+ exrulevals.append(value)
+ elif name == "EXDATE":
+ for parm in parms:
+ if parm != "VALUE=DATE-TIME":
+ raise ValueError("unsupported RDATE parm: "+parm)
+ exdatevals.append(value)
+ elif name == "DTSTART":
+ for parm in parms:
+ raise ValueError("unsupported DTSTART parm: "+parm)
+ if not parser:
+ from dateutil import parser
+ dtstart = parser.parse(value, ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ else:
+ raise ValueError("unsupported property: "+name)
+ if (forceset or len(rrulevals) > 1 or rdatevals
+ or exrulevals or exdatevals):
+ if not parser and (rdatevals or exdatevals):
+ from dateutil import parser
+ rset = rruleset(cache=cache)
+ for value in rrulevals:
+ rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in rdatevals:
+ for datestr in value.split(','):
+ rset.rdate(parser.parse(datestr,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exrulevals:
+ rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exdatevals:
+ for datestr in value.split(','):
+ rset.exdate(parser.parse(datestr,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ if compatible and dtstart:
+ rset.rdate(dtstart)
+ return rset
+ else:
+ return self._parse_rfc_rrule(rrulevals[0],
+ dtstart=dtstart,
+ cache=cache,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+
+ def __call__(self, s, **kwargs):
+ return self._parse_rfc(s, **kwargs)
+
+rrulestr = _rrulestr()
+
+# vim:ts=4:sw=4:et
diff --git a/app/lib/dateutil/tz/__init__.py b/app/lib/dateutil/tz/__init__.py
new file mode 100644
index 0000000..1cba7b9
--- /dev/null
+++ b/app/lib/dateutil/tz/__init__.py
@@ -0,0 +1,4 @@
+from .tz import *
+
+__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
+ "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
diff --git a/app/lib/dateutil/tz/_common.py b/app/lib/dateutil/tz/_common.py
new file mode 100644
index 0000000..212e8ce
--- /dev/null
+++ b/app/lib/dateutil/tz/_common.py
@@ -0,0 +1,380 @@
+from six import PY3
+from six.moves import _thread
+
+from datetime import datetime, timedelta, tzinfo
+import copy
+
+ZERO = timedelta(0)
+
+__all__ = ['tzname_in_python2', 'enfold']
+
+
+def tzname_in_python2(namefunc):
+ """Change unicode output into bytestrings in Python 2
+
+ tzname() API changed in Python 3. It used to return bytes, but was changed
+ to unicode strings
+ """
+ def adjust_encoding(*args, **kwargs):
+ name = namefunc(*args, **kwargs)
+ if name is not None and not PY3:
+ name = name.encode()
+
+ return name
+
+ return adjust_encoding
+
+
+# The following is adapted from Alexander Belopolsky's tz library
+# https://github.com/abalkin/tz
+if hasattr(datetime, 'fold'):
+ # This is the pre-python 3.6 fold situation
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ ..versionadded:: 2.6.0
+ """
+ return dt.replace(fold=fold)
+
+else:
+ class _DatetimeWithFold(datetime):
+ """
+ This is a class designed to provide a PEP 495-compliant interface for
+ Python versions before 3.6. It is used only for dates in a fold, so
+ the ``fold`` attribute is fixed at ``1``.
+
+ ..versionadded:: 2.6.0
+ """
+ __slots__ = ()
+
+ @property
+ def fold(self):
+ return 1
+
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ ..versionadded:: 2.6.0
+ """
+ if getattr(dt, 'fold', 0) == fold:
+ return dt
+
+ args = dt.timetuple()[:6]
+ args += (dt.microsecond, dt.tzinfo)
+
+ if fold:
+ return _DatetimeWithFold(*args)
+ else:
+ return datetime(*args)
+
+
+class _tzinfo(tzinfo):
+ """
+ Base class for all ``dateutil`` ``tzinfo`` objects.
+ """
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ ..versionadded:: 2.6.0
+ """
+
+ dt = dt.replace(tzinfo=self)
+
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
+
+ return same_dt and not same_offset
+
+ def _fold_status(self, dt_utc, dt_wall):
+ """
+ Determine the fold status of a "wall" datetime, given a representation
+ of the same datetime as a (naive) UTC datetime. This is calculated based
+ on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
+ datetimes, and that this offset is the actual number of hours separating
+ ``dt_utc`` and ``dt_wall``.
+
+ :param dt_utc:
+ Representation of the datetime as UTC
+
+ :param dt_wall:
+ Representation of the datetime as "wall time". This parameter must
+ either have a `fold` attribute or have a fold-naive
+ :class:`datetime.tzinfo` attached, otherwise the calculation may
+ fail.
+ """
+ if self.is_ambiguous(dt_wall):
+ delta_wall = dt_wall - dt_utc
+ _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
+ else:
+ _fold = 0
+
+ return _fold
+
+ def _fold(self, dt):
+ return getattr(dt, 'fold', 0)
+
+ def _fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurence, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.dateime` object.
+ """
+
+ # Re-implement the algorithm from Python's datetime.py
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ dtoff = dt.utcoffset()
+ if dtoff is None:
+ raise ValueError("fromutc() requires a non-None utcoffset() "
+ "result")
+
+ # The original datetime.py code assumes that `dst()` defaults to
+ # zero during ambiguous times. PEP 495 inverts this presumption, so
+ # for pre-PEP 495 versions of python, we need to tweak the algorithm.
+ dtdst = dt.dst()
+ if dtdst is None:
+ raise ValueError("fromutc() requires a non-None dst() result")
+ delta = dtoff - dtdst
+ if delta:
+ dt += delta
+ # Set fold=1 so we can default to being in the fold for
+ # ambiguous dates.
+ dtdst = enfold(dt, fold=1).dst()
+ if dtdst is None:
+ raise ValueError("fromutc(): dt.dst gave inconsistent "
+ "results; cannot convert")
+ return dt + dtdst
+
+ def fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurance, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.dateime` object.
+ """
+ dt_wall = self._fromutc(dt)
+
+ # Calculate the fold status given the two datetimes.
+ _fold = self._fold_status(dt, dt_wall)
+
+ # Set the default fold value for ambiguous dates
+ return enfold(dt_wall, fold=_fold)
+
+
+class tzrangebase(_tzinfo):
+ """
+ This is an abstract base class for time zones represented by an annual
+ transition into and out of DST. Child classes should implement the following
+ methods:
+
+ * ``__init__(self, *args, **kwargs)``
+ * ``transitions(self, year)`` - this is expected to return a tuple of
+ datetimes representing the DST on and off transitions in standard
+ time.
+
+ A fully initialized ``tzrangebase`` subclass should also provide the
+ following attributes:
+ * ``hasdst``: Boolean whether or not the zone uses DST.
+ * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
+ representing the respective UTC offsets.
+ * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
+ abbreviations in DST and STD, respectively.
+ * ``_hasdst``: Whether or not the zone has DST.
+
+ ..versionadded:: 2.6.0
+ """
+ def __init__(self):
+ raise NotImplementedError('tzrangebase is an abstract base class')
+
+ def utcoffset(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_base_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if self._isdst(dt):
+ return self._dst_abbr
+ else:
+ return self._std_abbr
+
+ def fromutc(self, dt):
+ """ Given a datetime in UTC, return local time """
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ # Get transitions - if there are none, fixed offset
+ transitions = self.transitions(dt.year)
+ if transitions is None:
+ return dt + self.utcoffset(dt)
+
+ # Get the transition times in UTC
+ dston, dstoff = transitions
+
+ dston -= self._std_offset
+ dstoff -= self._std_offset
+
+ utc_transitions = (dston, dstoff)
+ dt_utc = dt.replace(tzinfo=None)
+
+
+ isdst = self._naive_isdst(dt_utc, utc_transitions)
+
+ if isdst:
+ dt_wall = dt + self._dst_offset
+ else:
+ dt_wall = dt + self._std_offset
+
+ _fold = int(not isdst and self.is_ambiguous(dt_wall))
+
+ return enfold(dt_wall, fold=_fold)
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if not self.hasdst:
+ return False
+
+ start, end = self.transitions(dt.year)
+
+ dt = dt.replace(tzinfo=None)
+ return (end <= dt < end + self._dst_base_offset)
+
+ def _isdst(self, dt):
+ if not self.hasdst:
+ return False
+ elif dt is None:
+ return None
+
+ transitions = self.transitions(dt.year)
+
+ if transitions is None:
+ return False
+
+ dt = dt.replace(tzinfo=None)
+
+ isdst = self._naive_isdst(dt, transitions)
+
+ # Handle ambiguous dates
+ if not isdst and self.is_ambiguous(dt):
+ return not self._fold(dt)
+ else:
+ return isdst
+
+ def _naive_isdst(self, dt, transitions):
+ dston, dstoff = transitions
+
+ dt = dt.replace(tzinfo=None)
+
+ if dston < dstoff:
+ isdst = dston <= dt < dstoff
+ else:
+ isdst = not dstoff <= dt < dston
+
+ return isdst
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_offset - self._std_offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(...)" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+def _total_seconds(td):
+ # Python 2.6 doesn't have a total_seconds() method on timedelta objects
+ return ((td.seconds + td.days * 86400) * 1000000 +
+ td.microseconds) // 1000000
+
+_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds)
diff --git a/app/lib/dateutil/tz/tz.py b/app/lib/dateutil/tz/tz.py
new file mode 100644
index 0000000..6bee291
--- /dev/null
+++ b/app/lib/dateutil/tz/tz.py
@@ -0,0 +1,1464 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers timezone implementations subclassing the abstract
+:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
+(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
+environment string (in all known formats), given ranges (with help from
+relative deltas), local machine timezone, fixed offset timezone, and UTC
+timezone.
+"""
+import datetime
+import struct
+import time
+import sys
+import os
+import bisect
+import copy
+
+from operator import itemgetter
+
+from contextlib import contextmanager
+
+from six import string_types, PY3
+from ._common import tzname_in_python2, _tzinfo, _total_seconds
+from ._common import tzrangebase, enfold
+
+try:
+ from .win import tzwin, tzwinlocal
+except ImportError:
+ tzwin = tzwinlocal = None
+
+ZERO = datetime.timedelta(0)
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+EPOCHORDINAL = EPOCH.toordinal()
+
+class tzutc(datetime.tzinfo):
+ """
+ This is a tzinfo object that represents the UTC time zone.
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def dst(self, dt):
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return "UTC"
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ def __eq__(self, other):
+ if not isinstance(other, (tzutc, tzoffset)):
+ return NotImplemented
+
+ return (isinstance(other, tzutc) or
+ (isinstance(other, tzoffset) and other._offset == ZERO))
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+class tzoffset(datetime.tzinfo):
+ """
+ A simple class for representing a fixed offset from UTC.
+
+ :param name:
+ The timezone name, to be returned when ``tzname()`` is called.
+
+ :param offset:
+ The time zone offset in seconds, or (since version 2.6.0, represented
+ as a :py:class:`datetime.timedelta` object.
+ """
+ def __init__(self, name, offset):
+ self._name = name
+
+ try:
+ # Allow a timedelta
+ offset = _total_seconds(offset)
+ except (TypeError, AttributeError):
+ pass
+ self._offset = datetime.timedelta(seconds=offset)
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def dst(self, dt):
+ return ZERO
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._name
+
+ def __eq__(self, other):
+ if not isinstance(other, tzoffset):
+ return NotImplemented
+
+ return self._offset == other._offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s, %s)" % (self.__class__.__name__,
+ repr(self._name),
+ int(_total_seconds(self._offset)))
+
+ __reduce__ = object.__reduce__
+
+
+class tzlocal(_tzinfo):
+ """
+ A :class:`tzinfo` subclass built around the ``time`` timezone functions.
+ """
+ def __init__(self):
+ super(tzlocal, self).__init__()
+
+ self._std_offset = datetime.timedelta(seconds=-time.timezone)
+ if time.daylight:
+ self._dst_offset = datetime.timedelta(seconds=-time.altzone)
+ else:
+ self._dst_offset = self._std_offset
+
+ self._dst_saved = self._dst_offset - self._std_offset
+ self._hasdst = bool(self._dst_saved)
+
+ def utcoffset(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset - self._std_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return time.tzname[self._isdst(dt)]
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ naive_dst = self._naive_is_dst(dt)
+ return (not naive_dst and
+ (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
+
+ def _naive_is_dst(self, dt):
+ timestamp = _datetime_to_timestamp(dt)
+ return time.localtime(timestamp + time.timezone).tm_isdst
+
+ def _isdst(self, dt, fold_naive=True):
+ # We can't use mktime here. It is unstable when deciding if
+ # the hour near to a change is DST or not.
+ #
+ # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
+ # dt.minute, dt.second, dt.weekday(), 0, -1))
+ # return time.localtime(timestamp).tm_isdst
+ #
+ # The code above yields the following result:
+ #
+ # >>> import tz, datetime
+ # >>> t = tz.tzlocal()
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ #
+ # Here is a more stable implementation:
+ #
+ if not self._hasdst:
+ return False
+
+ # Check for ambiguous times:
+ dstval = self._naive_is_dst(dt)
+ fold = getattr(dt, 'fold', None)
+
+ if self.is_ambiguous(dt):
+ if fold is not None:
+ return not self._fold(dt)
+ else:
+ return True
+
+ return dstval
+
+ def __eq__(self, other):
+ if not isinstance(other, tzlocal):
+ return NotImplemented
+
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+class _ttinfo(object):
+ __slots__ = ["offset", "delta", "isdst", "abbr",
+ "isstd", "isgmt", "dstoffset"]
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def __repr__(self):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ttinfo):
+ return NotImplemented
+
+ return (self.offset == other.offset and
+ self.delta == other.delta and
+ self.isdst == other.isdst and
+ self.abbr == other.abbr and
+ self.isstd == other.isstd and
+ self.isgmt == other.isgmt and
+ self.dstoffset == other.dstoffset)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __getstate__(self):
+ state = {}
+ for name in self.__slots__:
+ state[name] = getattr(self, name, None)
+ return state
+
+ def __setstate__(self, state):
+ for name in self.__slots__:
+ if name in state:
+ setattr(self, name, state[name])
+
+
+class _tzfile(object):
+ """
+ Lightweight class for holding the relevant transition and time zone
+ information read from binary tzfiles.
+ """
+ attrs = ['trans_list', 'trans_idx', 'ttinfo_list',
+ 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
+
+ def __init__(self, **kwargs):
+ for attr in self.attrs:
+ setattr(self, attr, kwargs.get(attr, None))
+
+
+class tzfile(_tzinfo):
+ """
+ This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
+ format timezone files to extract current and historical zone information.
+
+ :param fileobj:
+ This can be an opened file stream or a file name that the time zone
+ information can be read from.
+
+ :param filename:
+ This is an optional parameter specifying the source of the time zone
+ information in the event that ``fileobj`` is a file object. If omitted
+ and ``fileobj`` is a file stream, this parameter will be set either to
+ ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
+
+ See `Sources for Time Zone and Daylight Saving Time Data
+ `_ for more information. Time zone
+ files can be compiled from the `IANA Time Zone database files
+ `_ with the `zic time zone compiler
+ `_
+ """
+
+ def __init__(self, fileobj, filename=None):
+ super(tzfile, self).__init__()
+
+ file_opened_here = False
+ if isinstance(fileobj, string_types):
+ self._filename = fileobj
+ fileobj = open(fileobj, 'rb')
+ file_opened_here = True
+ elif filename is not None:
+ self._filename = filename
+ elif hasattr(fileobj, "name"):
+ self._filename = fileobj.name
+ else:
+ self._filename = repr(fileobj)
+
+ if fileobj is not None:
+ if not file_opened_here:
+ fileobj = _ContextWrapper(fileobj)
+
+ with fileobj as file_stream:
+ tzobj = self._read_tzfile(file_stream)
+
+ self._set_tzdata(tzobj)
+
+ def _set_tzdata(self, tzobj):
+ """ Set the time zone data of this object from a _tzfile object """
+ # Copy the relevant attributes over as private attributes
+ for attr in _tzfile.attrs:
+ setattr(self, '_' + attr, getattr(tzobj, attr))
+
+ def _read_tzfile(self, fileobj):
+ out = _tzfile()
+
+ # From tzfile(5):
+ #
+ # The time zone information files used by tzset(3)
+ # begin with the magic characters "TZif" to identify
+ # them as time zone information files, followed by
+ # sixteen bytes reserved for future use, followed by
+ # six four-byte values of type long, written in a
+ # ``standard'' byte order (the high-order byte
+ # of the value is written first).
+ if fileobj.read(4).decode() != "TZif":
+ raise ValueError("magic not found")
+
+ fileobj.read(16)
+
+ (
+ # The number of UTC/local indicators stored in the file.
+ ttisgmtcnt,
+
+ # The number of standard/wall indicators stored in the file.
+ ttisstdcnt,
+
+ # The number of leap seconds for which data is
+ # stored in the file.
+ leapcnt,
+
+ # The number of "transition times" for which data
+ # is stored in the file.
+ timecnt,
+
+ # The number of "local time types" for which data
+ # is stored in the file (must not be zero).
+ typecnt,
+
+ # The number of characters of "time zone
+ # abbreviation strings" stored in the file.
+ charcnt,
+
+ ) = struct.unpack(">6l", fileobj.read(24))
+
+ # The above header is followed by tzh_timecnt four-byte
+ # values of type long, sorted in ascending order.
+ # These values are written in ``standard'' byte order.
+ # Each is used as a transition time (as returned by
+ # time(2)) at which the rules for computing local time
+ # change.
+
+ if timecnt:
+ out.trans_list = list(struct.unpack(">%dl" % timecnt,
+ fileobj.read(timecnt*4)))
+ else:
+ out.trans_list = []
+
+ # Next come tzh_timecnt one-byte values of type unsigned
+ # char; each one tells which of the different types of
+ # ``local time'' types described in the file is associated
+ # with the same-indexed transition time. These values
+ # serve as indices into an array of ttinfo structures that
+ # appears next in the file.
+
+ if timecnt:
+ out.trans_idx = struct.unpack(">%dB" % timecnt,
+ fileobj.read(timecnt))
+ else:
+ out.trans_idx = []
+
+ # Each ttinfo structure is written as a four-byte value
+ # for tt_gmtoff of type long, in a standard byte
+ # order, followed by a one-byte value for tt_isdst
+ # and a one-byte value for tt_abbrind. In each
+ # structure, tt_gmtoff gives the number of
+ # seconds to be added to UTC, tt_isdst tells whether
+ # tm_isdst should be set by localtime(3), and
+ # tt_abbrind serves as an index into the array of
+ # time zone abbreviation characters that follow the
+ # ttinfo structure(s) in the file.
+
+ ttinfo = []
+
+ for i in range(typecnt):
+ ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
+
+ abbr = fileobj.read(charcnt).decode()
+
+ # Then there are tzh_leapcnt pairs of four-byte
+ # values, written in standard byte order; the
+ # first value of each pair gives the time (as
+ # returned by time(2)) at which a leap second
+ # occurs; the second gives the total number of
+ # leap seconds to be applied after the given time.
+ # The pairs of values are sorted in ascending order
+ # by time.
+
+ # Not used, for now (but read anyway for correct file position)
+ if leapcnt:
+ leap = struct.unpack(">%dl" % (leapcnt*2),
+ fileobj.read(leapcnt*8))
+
+ # Then there are tzh_ttisstdcnt standard/wall
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as standard
+ # time or wall clock time, and are used when
+ # a time zone file is used in handling POSIX-style
+ # time zone environment variables.
+
+ if ttisstdcnt:
+ isstd = struct.unpack(">%db" % ttisstdcnt,
+ fileobj.read(ttisstdcnt))
+
+ # Finally, there are tzh_ttisgmtcnt UTC/local
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as UTC or
+ # local time, and are used when a time zone file
+ # is used in handling POSIX-style time zone envi-
+ # ronment variables.
+
+ if ttisgmtcnt:
+ isgmt = struct.unpack(">%db" % ttisgmtcnt,
+ fileobj.read(ttisgmtcnt))
+
+ # Build ttinfo list
+ out.ttinfo_list = []
+ for i in range(typecnt):
+ gmtoff, isdst, abbrind = ttinfo[i]
+ # Round to full-minutes if that's not the case. Python's
+ # datetime doesn't accept sub-minute timezones. Check
+ # http://python.org/sf/1447945 for some information.
+ gmtoff = 60 * ((gmtoff + 30) // 60)
+ tti = _ttinfo()
+ tti.offset = gmtoff
+ tti.dstoffset = datetime.timedelta(0)
+ tti.delta = datetime.timedelta(seconds=gmtoff)
+ tti.isdst = isdst
+ tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
+ tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
+ tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
+ out.ttinfo_list.append(tti)
+
+ # Replace ttinfo indexes for ttinfo objects.
+ out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
+
+ # Set standard, dst, and before ttinfos. before will be
+ # used when a given time is before any transitions,
+ # and will be set to the first non-dst ttinfo, or to
+ # the first dst, if all of them are dst.
+ out.ttinfo_std = None
+ out.ttinfo_dst = None
+ out.ttinfo_before = None
+ if out.ttinfo_list:
+ if not out.trans_list:
+ out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
+ else:
+ for i in range(timecnt-1, -1, -1):
+ tti = out.trans_idx[i]
+ if not out.ttinfo_std and not tti.isdst:
+ out.ttinfo_std = tti
+ elif not out.ttinfo_dst and tti.isdst:
+ out.ttinfo_dst = tti
+
+ if out.ttinfo_std and out.ttinfo_dst:
+ break
+ else:
+ if out.ttinfo_dst and not out.ttinfo_std:
+ out.ttinfo_std = out.ttinfo_dst
+
+ for tti in out.ttinfo_list:
+ if not tti.isdst:
+ out.ttinfo_before = tti
+ break
+ else:
+ out.ttinfo_before = out.ttinfo_list[0]
+
+ # Now fix transition times to become relative to wall time.
+ #
+ # I'm not sure about this. In my tests, the tz source file
+ # is setup to wall time, and in the binary file isstd and
+ # isgmt are off, so it should be in wall time. OTOH, it's
+ # always in gmt time. Let me know if you have comments
+ # about this.
+ laststdoffset = None
+ for i, tti in enumerate(out.trans_idx):
+ if not tti.isdst:
+ offset = tti.offset
+ laststdoffset = offset
+ else:
+ if laststdoffset is not None:
+ # Store the DST offset as well and update it in the list
+ tti.dstoffset = tti.offset - laststdoffset
+ out.trans_idx[i] = tti
+
+ offset = laststdoffset or 0
+
+ out.trans_list[i] += offset
+
+ # In case we missed any DST offsets on the way in for some reason, make
+ # a second pass over the list, looking for the /next/ DST offset.
+ laststdoffset = None
+ for i in reversed(range(len(out.trans_idx))):
+ tti = out.trans_idx[i]
+ if tti.isdst:
+ if not (tti.dstoffset or laststdoffset is None):
+ tti.dstoffset = tti.offset - laststdoffset
+ else:
+ laststdoffset = tti.offset
+
+ if not isinstance(tti.dstoffset, datetime.timedelta):
+ tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
+
+ out.trans_idx[i] = tti
+
+ out.trans_idx = tuple(out.trans_idx)
+ out.trans_list = tuple(out.trans_list)
+
+ return out
+
+ def _find_last_transition(self, dt):
+ # If there's no list, there are no transitions to find
+ if not self._trans_list:
+ return None
+
+ timestamp = _datetime_to_timestamp(dt)
+
+ # Find where the timestamp fits in the transition list - if the
+ # timestamp is a transition time, it's part of the "after" period.
+ idx = bisect.bisect_right(self._trans_list, timestamp)
+
+ # We want to know when the previous transition was, so subtract off 1
+ return idx - 1
+
+ def _get_ttinfo(self, idx):
+ # For no list or after the last transition, default to _ttinfo_std
+ if idx is None or (idx + 1) == len(self._trans_list):
+ return self._ttinfo_std
+
+ # If there is a list and the time is before it, return _ttinfo_before
+ if idx < 0:
+ return self._ttinfo_before
+
+ return self._trans_idx[idx]
+
+ def _find_ttinfo(self, dt):
+ idx = self._resolve_ambiguous_time(dt)
+
+ return self._get_ttinfo(idx)
+
+ def is_ambiguous(self, dt, idx=None):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if idx is None:
+ idx = self._find_last_transition(dt)
+
+ # Calculate the difference in offsets from current to previous
+ timestamp = _datetime_to_timestamp(dt)
+ tti = self._get_ttinfo(idx)
+
+ if idx is None or idx <= 0:
+ return False
+
+ od = self._get_ttinfo(idx - 1).offset - tti.offset
+ tt = self._trans_list[idx] # Transition time
+
+ return timestamp < tt + od
+
+ def _resolve_ambiguous_time(self, dt):
+ idx = self._find_last_transition(dt)
+
+ # If we have no transitions, return the index
+ _fold = self._fold(dt)
+ if idx is None or idx == 0:
+ return idx
+
+ # Get the current datetime as a timestamp
+ idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
+
+ return idx - idx_offset
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_std:
+ return ZERO
+
+ return self._find_ttinfo(dt).delta
+
+ def dst(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_dst:
+ return ZERO
+
+ tti = self._find_ttinfo(dt)
+
+ if not tti.isdst:
+ return ZERO
+
+ # The documentation says that utcoffset()-dst() must
+ # be constant for every dt.
+ return tti.dstoffset
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if not self._ttinfo_std or dt is None:
+ return None
+ return self._find_ttinfo(dt).abbr
+
+ def __eq__(self, other):
+ if not isinstance(other, tzfile):
+ return NotImplemented
+ return (self._trans_list == other._trans_list and
+ self._trans_idx == other._trans_idx and
+ self._ttinfo_list == other._ttinfo_list)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
+
+ def __reduce__(self):
+ return self.__reduce_ex__(None)
+
+ def __reduce_ex__(self, protocol):
+ return (self.__class__, (None, self._filename), self.__dict__)
+
+
+class tzrange(tzrangebase):
+ """
+ The ``tzrange`` object is a time zone specified by a set of offsets and
+ abbreviations, equivalent to the way the ``TZ`` variable can be specified
+ in POSIX-like systems, but using Python delta objects to specify DST
+ start, end and offsets.
+
+ :param stdabbr:
+ The abbreviation for standard time (e.g. ``'EST'``).
+
+ :param stdoffset:
+ An integer or :class:`datetime.timedelta` object or equivalent
+ specifying the base offset from UTC.
+
+ If unspecified, +00:00 is used.
+
+ :param dstabbr:
+ The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
+
+ If specified, with no other DST information, DST is assumed to occur
+ and the default behavior or ``dstoffset``, ``start`` and ``end`` is
+ used. If unspecified and no other DST information is specified, it
+ is assumed that this zone has no DST.
+
+ If this is unspecified and other DST information is *is* specified,
+ DST occurs in the zone but the time zone abbreviation is left
+ unchanged.
+
+ :param dstoffset:
+ A an integer or :class:`datetime.timedelta` object or equivalent
+ specifying the UTC offset during DST. If unspecified and any other DST
+ information is specified, it is assumed to be the STD offset +1 hour.
+
+ :param start:
+ A :class:`relativedelta.relativedelta` object or equivalent specifying
+ the time and time of year that daylight savings time starts. To specify,
+ for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
+
+ ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
+
+ If unspecified and any other DST information is specified, the default
+ value is 2 AM on the first Sunday in April.
+
+ :param end:
+ A :class:`relativedelta.relativedelta` object or equivalent representing
+ the time and time of year that daylight savings time ends, with the
+ same specification method as in ``start``. One note is that this should
+ point to the first time in the *standard* zone, so if a transition
+ occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
+ set the `hours` parameter to +1.
+
+
+ **Examples:**
+
+ .. testsetup:: tzrange
+
+ from dateutil.tz import tzrange, tzstr
+
+ .. doctest:: tzrange
+
+ >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
+ True
+
+ >>> from dateutil.relativedelta import *
+ >>> range1 = tzrange("EST", -18000, "EDT")
+ >>> range2 = tzrange("EST", -18000, "EDT", -14400,
+ ... relativedelta(hours=+2, month=4, day=1,
+ ... weekday=SU(+1)),
+ ... relativedelta(hours=+1, month=10, day=31,
+ ... weekday=SU(-1)))
+ >>> tzstr('EST5EDT') == range1 == range2
+ True
+
+ """
+ def __init__(self, stdabbr, stdoffset=None,
+ dstabbr=None, dstoffset=None,
+ start=None, end=None):
+
+ global relativedelta
+ from dateutil import relativedelta
+
+ self._std_abbr = stdabbr
+ self._dst_abbr = dstabbr
+
+ try:
+ stdoffset = _total_seconds(stdoffset)
+ except (TypeError, AttributeError):
+ pass
+
+ try:
+ dstoffset = _total_seconds(dstoffset)
+ except (TypeError, AttributeError):
+ pass
+
+ if stdoffset is not None:
+ self._std_offset = datetime.timedelta(seconds=stdoffset)
+ else:
+ self._std_offset = ZERO
+
+ if dstoffset is not None:
+ self._dst_offset = datetime.timedelta(seconds=dstoffset)
+ elif dstabbr and stdoffset is not None:
+ self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
+ else:
+ self._dst_offset = ZERO
+
+ if dstabbr and start is None:
+ self._start_delta = relativedelta.relativedelta(
+ hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
+ else:
+ self._start_delta = start
+
+ if dstabbr and end is None:
+ self._end_delta = relativedelta.relativedelta(
+ hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
+ else:
+ self._end_delta = end
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = bool(self._start_delta)
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+ if not self.hasdst:
+ return None
+
+ base_year = datetime.datetime(year, 1, 1)
+
+ start = base_year + self._start_delta
+ end = base_year + self._end_delta
+
+ return (start, end)
+
+ def __eq__(self, other):
+ if not isinstance(other, tzrange):
+ return NotImplemented
+
+ return (self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr and
+ self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._start_delta == other._start_delta and
+ self._end_delta == other._end_delta)
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+class tzstr(tzrange):
+ """
+ ``tzstr`` objects are time zone objects specified by a time-zone string as
+ it would be passed to a ``TZ`` variable on POSIX-style systems (see
+ the `GNU C Library: TZ Variable`_ for more details).
+
+ There is one notable exception, which is that POSIX-style time zones use an
+ inverted offset format, so normally ``GMT+3`` would be parsed as an offset
+ 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
+ offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
+ behavior, pass a ``True`` value to ``posix_offset``.
+
+ The :class:`tzrange` object provides the same functionality, but is
+ specified using :class:`relativedelta.relativedelta` objects. rather than
+ strings.
+
+ :param s:
+ A time zone string in ``TZ`` variable format. This can be a
+ :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
+ or a stream emitting unicode characters (e.g. :class:`StringIO`).
+
+ :param posix_offset:
+ Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
+ ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
+ POSIX standard.
+
+ .. _`GNU C Library: TZ Variable`:
+ https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
+ """
+ def __init__(self, s, posix_offset=False):
+ global parser
+ from dateutil import parser
+
+ self._s = s
+
+ res = parser._parsetz(s)
+ if res is None:
+ raise ValueError("unknown string format")
+
+ # Here we break the compatibility with the TZ variable handling.
+ # GMT-3 actually *means* the timezone -3.
+ if res.stdabbr in ("GMT", "UTC") and not posix_offset:
+ res.stdoffset *= -1
+
+ # We must initialize it first, since _delta() needs
+ # _std_offset and _dst_offset set. Use False in start/end
+ # to avoid building it two times.
+ tzrange.__init__(self, res.stdabbr, res.stdoffset,
+ res.dstabbr, res.dstoffset,
+ start=False, end=False)
+
+ if not res.dstabbr:
+ self._start_delta = None
+ self._end_delta = None
+ else:
+ self._start_delta = self._delta(res.start)
+ if self._start_delta:
+ self._end_delta = self._delta(res.end, isend=1)
+
+ self.hasdst = bool(self._start_delta)
+
+ def _delta(self, x, isend=0):
+ from dateutil import relativedelta
+ kwargs = {}
+ if x.month is not None:
+ kwargs["month"] = x.month
+ if x.weekday is not None:
+ kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
+ if x.week > 0:
+ kwargs["day"] = 1
+ else:
+ kwargs["day"] = 31
+ elif x.day:
+ kwargs["day"] = x.day
+ elif x.yday is not None:
+ kwargs["yearday"] = x.yday
+ elif x.jyday is not None:
+ kwargs["nlyearday"] = x.jyday
+ if not kwargs:
+ # Default is to start on first sunday of april, and end
+ # on last sunday of october.
+ if not isend:
+ kwargs["month"] = 4
+ kwargs["day"] = 1
+ kwargs["weekday"] = relativedelta.SU(+1)
+ else:
+ kwargs["month"] = 10
+ kwargs["day"] = 31
+ kwargs["weekday"] = relativedelta.SU(-1)
+ if x.time is not None:
+ kwargs["seconds"] = x.time
+ else:
+ # Default is 2AM.
+ kwargs["seconds"] = 7200
+ if isend:
+ # Convert to standard time, to follow the documented way
+ # of working with the extra hour. See the documentation
+ # of the tzinfo class.
+ delta = self._dst_offset - self._std_offset
+ kwargs["seconds"] -= delta.seconds + delta.days * 86400
+ return relativedelta.relativedelta(**kwargs)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+
+class _tzicalvtzcomp(object):
+ def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
+ tzname=None, rrule=None):
+ self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
+ self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
+ self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
+ self.isdst = isdst
+ self.tzname = tzname
+ self.rrule = rrule
+
+
+class _tzicalvtz(_tzinfo):
+ def __init__(self, tzid, comps=[]):
+ super(_tzicalvtz, self).__init__()
+
+ self._tzid = tzid
+ self._comps = comps
+ self._cachedate = []
+ self._cachecomp = []
+
+ def _find_comp(self, dt):
+ if len(self._comps) == 1:
+ return self._comps[0]
+
+ dt = dt.replace(tzinfo=None)
+
+ try:
+ return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
+ except ValueError:
+ pass
+
+
+ lastcompdt = None
+ lastcomp = None
+
+ for comp in self._comps:
+ compdt = self._find_compdt(comp, dt)
+
+ if compdt and (not lastcompdt or lastcompdt < compdt):
+ lastcompdt = compdt
+ lastcomp = comp
+
+ if not lastcomp:
+ # RFC says nothing about what to do when a given
+ # time is before the first onset date. We'll look for the
+ # first standard component, or the first component, if
+ # none is found.
+ for comp in self._comps:
+ if not comp.isdst:
+ lastcomp = comp
+ break
+ else:
+ lastcomp = comp[0]
+
+ self._cachedate.insert(0, (dt, self._fold(dt)))
+ self._cachecomp.insert(0, lastcomp)
+
+ if len(self._cachedate) > 10:
+ self._cachedate.pop()
+ self._cachecomp.pop()
+
+ return lastcomp
+
+ def _find_compdt(self, comp, dt):
+ if comp.tzoffsetdiff < ZERO and self._fold(dt):
+ dt -= comp.tzoffsetdiff
+
+ compdt = comp.rrule.before(dt, inc=True)
+
+ return compdt
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ return self._find_comp(dt).tzoffsetto
+
+ def dst(self, dt):
+ comp = self._find_comp(dt)
+ if comp.isdst:
+ return comp.tzoffsetdiff
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._find_comp(dt).tzname
+
+ def __repr__(self):
+ return "" % repr(self._tzid)
+
+ __reduce__ = object.__reduce__
+
+
+class tzical(object):
+ """
+ This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
+ as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
+
+ :param `fileobj`:
+ A file or stream in iCalendar format, which should be UTF-8 encoded
+ with CRLF endings.
+
+ .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
+ """
+ def __init__(self, fileobj):
+ global rrule
+ from dateutil import rrule
+
+ if isinstance(fileobj, string_types):
+ self._s = fileobj
+ # ical should be encoded in UTF-8 with CRLF
+ fileobj = open(fileobj, 'r')
+ file_opened_here = True
+ else:
+ self._s = getattr(fileobj, 'name', repr(fileobj))
+ fileobj = _ContextWrapper(fileobj)
+
+ self._vtz = {}
+
+ with fileobj as fobj:
+ self._parse_rfc(fobj.read())
+
+ def keys(self):
+ """
+ Retrieves the available time zones as a list.
+ """
+ return list(self._vtz.keys())
+
+ def get(self, tzid=None):
+ """
+ Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
+
+ :param tzid:
+ If there is exactly one time zone available, omitting ``tzid``
+ or passing :py:const:`None` value returns it. Otherwise a valid
+ key (which can be retrieved from :func:`keys`) is required.
+
+ :raises ValueError:
+ Raised if ``tzid`` is not specified but there are either more
+ or fewer than 1 zone defined.
+
+ :returns:
+ Returns either a :py:class:`datetime.tzinfo` object representing
+ the relevant time zone or :py:const:`None` if the ``tzid`` was
+ not found.
+ """
+ if tzid is None:
+ if len(self._vtz) == 0:
+ raise ValueError("no timezones defined")
+ elif len(self._vtz) > 1:
+ raise ValueError("more than one timezone available")
+ tzid = next(iter(self._vtz))
+
+ return self._vtz.get(tzid)
+
+ def _parse_offset(self, s):
+ s = s.strip()
+ if not s:
+ raise ValueError("empty offset")
+ if s[0] in ('+', '-'):
+ signal = (-1, +1)[s[0] == '+']
+ s = s[1:]
+ else:
+ signal = +1
+ if len(s) == 4:
+ return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
+ elif len(s) == 6:
+ return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
+ else:
+ raise ValueError("invalid offset: " + s)
+
+ def _parse_rfc(self, s):
+ lines = s.splitlines()
+ if not lines:
+ raise ValueError("empty string")
+
+ # Unfold
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+
+ tzid = None
+ comps = []
+ invtz = False
+ comptype = None
+ for line in lines:
+ if not line:
+ continue
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0].upper()
+ parms = parms[1:]
+ if invtz:
+ if name == "BEGIN":
+ if value in ("STANDARD", "DAYLIGHT"):
+ # Process component
+ pass
+ else:
+ raise ValueError("unknown component: "+value)
+ comptype = value
+ founddtstart = False
+ tzoffsetfrom = None
+ tzoffsetto = None
+ rrulelines = []
+ tzname = None
+ elif name == "END":
+ if value == "VTIMEZONE":
+ if comptype:
+ raise ValueError("component not closed: "+comptype)
+ if not tzid:
+ raise ValueError("mandatory TZID not found")
+ if not comps:
+ raise ValueError(
+ "at least one component is needed")
+ # Process vtimezone
+ self._vtz[tzid] = _tzicalvtz(tzid, comps)
+ invtz = False
+ elif value == comptype:
+ if not founddtstart:
+ raise ValueError("mandatory DTSTART not found")
+ if tzoffsetfrom is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ if tzoffsetto is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ # Process component
+ rr = None
+ if rrulelines:
+ rr = rrule.rrulestr("\n".join(rrulelines),
+ compatible=True,
+ ignoretz=True,
+ cache=True)
+ comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
+ (comptype == "DAYLIGHT"),
+ tzname, rr)
+ comps.append(comp)
+ comptype = None
+ else:
+ raise ValueError("invalid component end: "+value)
+ elif comptype:
+ if name == "DTSTART":
+ rrulelines.append(line)
+ founddtstart = True
+ elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
+ rrulelines.append(line)
+ elif name == "TZOFFSETFROM":
+ if parms:
+ raise ValueError(
+ "unsupported %s parm: %s " % (name, parms[0]))
+ tzoffsetfrom = self._parse_offset(value)
+ elif name == "TZOFFSETTO":
+ if parms:
+ raise ValueError(
+ "unsupported TZOFFSETTO parm: "+parms[0])
+ tzoffsetto = self._parse_offset(value)
+ elif name == "TZNAME":
+ if parms:
+ raise ValueError(
+ "unsupported TZNAME parm: "+parms[0])
+ tzname = value
+ elif name == "COMMENT":
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ else:
+ if name == "TZID":
+ if parms:
+ raise ValueError(
+ "unsupported TZID parm: "+parms[0])
+ tzid = value
+ elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ elif name == "BEGIN" and value == "VTIMEZONE":
+ tzid = None
+ comps = []
+ invtz = True
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+if sys.platform != "win32":
+ TZFILES = ["/etc/localtime", "localtime"]
+ TZPATHS = ["/usr/share/zoneinfo",
+ "/usr/lib/zoneinfo",
+ "/usr/share/lib/zoneinfo",
+ "/etc/zoneinfo"]
+else:
+ TZFILES = []
+ TZPATHS = []
+
+
+def gettz(name=None):
+ tz = None
+ if not name:
+ try:
+ name = os.environ["TZ"]
+ except KeyError:
+ pass
+ if name is None or name == ":":
+ for filepath in TZFILES:
+ if not os.path.isabs(filepath):
+ filename = filepath
+ for path in TZPATHS:
+ filepath = os.path.join(path, filename)
+ if os.path.isfile(filepath):
+ break
+ else:
+ continue
+ if os.path.isfile(filepath):
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = tzlocal()
+ else:
+ if name.startswith(":"):
+ name = name[:-1]
+ if os.path.isabs(name):
+ if os.path.isfile(name):
+ tz = tzfile(name)
+ else:
+ tz = None
+ else:
+ for path in TZPATHS:
+ filepath = os.path.join(path, name)
+ if not os.path.isfile(filepath):
+ filepath = filepath.replace(' ', '_')
+ if not os.path.isfile(filepath):
+ continue
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = None
+ if tzwin is not None:
+ try:
+ tz = tzwin(name)
+ except WindowsError:
+ tz = None
+
+ if not tz:
+ from dateutil.zoneinfo import get_zonefile_instance
+ tz = get_zonefile_instance().get(name)
+
+ if not tz:
+ for c in name:
+ # name must have at least one offset to be a tzstr
+ if c in "0123456789":
+ try:
+ tz = tzstr(name)
+ except ValueError:
+ pass
+ break
+ else:
+ if name in ("GMT", "UTC"):
+ tz = tzutc()
+ elif name in time.tzname:
+ tz = tzlocal()
+ return tz
+
+
+def datetime_exists(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ would fall in a gap.
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" exists in ``tz``.
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+ tz = dt.tzinfo
+
+ dt = dt.replace(tzinfo=None)
+
+ # This is essentially a test of whether or not the datetime can survive
+ # a round trip to UTC.
+ dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
+ dt_rt = dt_rt.replace(tzinfo=None)
+
+ return dt == dt_rt
+
+
+def datetime_ambiguous(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ is ambiguous (i.e if there are two times differentiated only by their DST
+ status).
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" is ambiguous in
+ ``tz``.
+
+ .. versionadded:: 2.6.0
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+
+ tz = dt.tzinfo
+
+ # If a time zone defines its own "is_ambiguous" function, we'll use that.
+ is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
+ if is_ambiguous_fn is not None:
+ try:
+ return tz.is_ambiguous(dt)
+ except:
+ pass
+
+ # If it doesn't come out and tell us it's ambiguous, we'll just check if
+ # the fold attribute has any effect on this particular date and time.
+ dt = dt.replace(tzinfo=tz)
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dst = wall_0.dst() == wall_1.dst()
+
+ return not (same_offset and same_dst)
+
+
+def _datetime_to_timestamp(dt):
+ """
+ Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
+ since January 1, 1970, ignoring the time zone.
+ """
+ return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
+
+class _ContextWrapper(object):
+ """
+ Class for wrapping contexts so that they are passed through in a
+ with statement.
+ """
+ def __init__(self, context):
+ self.context = context
+
+ def __enter__(self):
+ return self.context
+
+ def __exit__(*args, **kwargs):
+ pass
+
+# vim:ts=4:sw=4:et
diff --git a/app/lib/dateutil/tz/win.py b/app/lib/dateutil/tz/win.py
new file mode 100644
index 0000000..9f4e551
--- /dev/null
+++ b/app/lib/dateutil/tz/win.py
@@ -0,0 +1,332 @@
+# This code was originally contributed by Jeffrey Harris.
+import datetime
+import struct
+
+from six.moves import winreg
+from six import text_type
+
+try:
+ import ctypes
+ from ctypes import wintypes
+except ValueError:
+ # ValueError is raised on non-Windows systems for some horrible reason.
+ raise ImportError("Running tzwin on non-Windows system")
+
+from ._common import tzname_in_python2, _tzinfo
+from ._common import tzrangebase
+
+__all__ = ["tzwin", "tzwinlocal", "tzres"]
+
+ONEWEEK = datetime.timedelta(7)
+
+TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
+TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
+TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
+
+
+def _settzkeyname():
+ handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+ try:
+ winreg.OpenKey(handle, TZKEYNAMENT).Close()
+ TZKEYNAME = TZKEYNAMENT
+ except WindowsError:
+ TZKEYNAME = TZKEYNAME9X
+ handle.Close()
+ return TZKEYNAME
+
+TZKEYNAME = _settzkeyname()
+
+
+class tzres(object):
+ """
+ Class for accessing `tzres.dll`, which contains timezone name related
+ resources.
+
+ .. versionadded:: 2.5.0
+ """
+ p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
+
+ def __init__(self, tzres_loc='tzres.dll'):
+ # Load the user32 DLL so we can load strings from tzres
+ user32 = ctypes.WinDLL('user32')
+
+ # Specify the LoadStringW function
+ user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
+ wintypes.UINT,
+ wintypes.LPWSTR,
+ ctypes.c_int)
+
+ self.LoadStringW = user32.LoadStringW
+ self._tzres = ctypes.WinDLL(tzres_loc)
+ self.tzres_loc = tzres_loc
+
+ def load_name(self, offset):
+ """
+ Load a timezone name from a DLL offset (integer).
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.load_name(112))
+ 'Eastern Standard Time'
+
+ :param offset:
+ A positive integer value referring to a string from the tzres dll.
+
+ ..note:
+ Offsets found in the registry are generally of the form
+ `@tzres.dll,-114`. The offset in this case if 114, not -114.
+
+ """
+ resource = self.p_wchar()
+ lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
+ nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
+ return resource[:nchar]
+
+ def name_from_string(self, tzname_str):
+ """
+ Parse strings as returned from the Windows registry into the time zone
+ name as defined in the registry.
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.name_from_string('@tzres.dll,-251'))
+ 'Dateline Daylight Time'
+ >>> print(tzr.name_from_string('Eastern Standard Time'))
+ 'Eastern Standard Time'
+
+ :param tzname_str:
+ A timezone name string as returned from a Windows registry key.
+
+ :return:
+ Returns the localized timezone string from tzres.dll if the string
+ is of the form `@tzres.dll,-offset`, else returns the input string.
+ """
+ if not tzname_str.startswith('@'):
+ return tzname_str
+
+ name_splt = tzname_str.split(',-')
+ try:
+ offset = int(name_splt[1])
+ except:
+ raise ValueError("Malformed timezone string.")
+
+ return self.load_name(offset)
+
+
+class tzwinbase(tzrangebase):
+ """tzinfo class based on win32's timezones available in the registry."""
+ def __init__(self):
+ raise NotImplementedError('tzwinbase is an abstract base class')
+
+ def __eq__(self, other):
+ # Compare on all relevant dimensions, including name.
+ if not isinstance(other, tzwinbase):
+ return NotImplemented
+
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._stddayofweek == other._stddayofweek and
+ self._dstdayofweek == other._dstdayofweek and
+ self._stdweeknumber == other._stdweeknumber and
+ self._dstweeknumber == other._dstweeknumber and
+ self._stdhour == other._stdhour and
+ self._dsthour == other._dsthour and
+ self._stdminute == other._stdminute and
+ self._dstminute == other._dstminute and
+ self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr)
+
+ @staticmethod
+ def list():
+ """Return a list of all time zones known to the system."""
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
+ result = [winreg.EnumKey(tzkey, i)
+ for i in range(winreg.QueryInfoKey(tzkey)[0])]
+ return result
+
+ def display(self):
+ return self._display
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+
+ if not self.hasdst:
+ return None
+
+ dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
+ self._dsthour, self._dstminute,
+ self._dstweeknumber)
+
+ dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
+ self._stdhour, self._stdminute,
+ self._stdweeknumber)
+
+ # Ambiguous dates default to the STD side
+ dstoff -= self._dst_base_offset
+
+ return dston, dstoff
+
+ def _get_hasdst(self):
+ return self._dstmonth != 0
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+class tzwin(tzwinbase):
+
+ def __init__(self, name):
+ self._name = name
+
+ # multiple contexts only possible in 2.7 and 3.1, we still support 2.6
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ keydict = valuestodict(tzkey)
+
+ self._std_abbr = keydict["Std"]
+ self._dst_abbr = keydict["Dlt"]
+
+ self._display = keydict["Display"]
+
+ # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
+ tup = struct.unpack("=3l16h", keydict["TZI"])
+ stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
+ dstoffset = stdoffset-tup[2] # + DaylightBias * -1
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
+ # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
+ (self._stdmonth,
+ self._stddayofweek, # Sunday = 0
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[4:9]
+
+ (self._dstmonth,
+ self._dstdayofweek, # Sunday = 0
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[12:17]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwin(%s)" % repr(self._name)
+
+ def __reduce__(self):
+ return (self.__class__, (self._name,))
+
+
+class tzwinlocal(tzwinbase):
+ def __init__(self):
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
+ keydict = valuestodict(tzlocalkey)
+
+ self._std_abbr = keydict["StandardName"]
+ self._dst_abbr = keydict["DaylightName"]
+
+ try:
+ tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME,
+ sn=self._std_abbr)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ _keydict = valuestodict(tzkey)
+ self._display = _keydict["Display"]
+ except OSError:
+ self._display = None
+
+ stdoffset = -keydict["Bias"]-keydict["StandardBias"]
+ dstoffset = stdoffset-keydict["DaylightBias"]
+
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # For reasons unclear, in this particular key, the day of week has been
+ # moved to the END of the SYSTEMTIME structure.
+ tup = struct.unpack("=8h", keydict["StandardStart"])
+
+ (self._stdmonth,
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[1:5]
+
+ self._stddayofweek = tup[7]
+
+ tup = struct.unpack("=8h", keydict["DaylightStart"])
+
+ (self._dstmonth,
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[1:5]
+
+ self._dstdayofweek = tup[7]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwinlocal()"
+
+ def __str__(self):
+ # str will return the standard name, not the daylight name.
+ return "tzwinlocal(%s)" % repr(self._std_abbr)
+
+ def __reduce__(self):
+ return (self.__class__, ())
+
+
+def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
+ """ dayofweek == 0 means Sunday, whichweek 5 means last instance """
+ first = datetime.datetime(year, month, 1, hour, minute)
+
+ # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
+ # Because 7 % 7 = 0
+ weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
+ wd = weekdayone + ((whichweek - 1) * ONEWEEK)
+ if (wd.month != month):
+ wd -= ONEWEEK
+
+ return wd
+
+
+def valuestodict(key):
+ """Convert a registry key's values to a dictionary."""
+ dout = {}
+ size = winreg.QueryInfoKey(key)[1]
+ tz_res = None
+
+ for i in range(size):
+ key_name, value, dtype = winreg.EnumValue(key, i)
+ if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
+ # If it's a DWORD (32-bit integer), it's stored as unsigned - convert
+ # that to a proper signed integer
+ if value & (1 << 31):
+ value = value - (1 << 32)
+ elif dtype == winreg.REG_SZ:
+ # If it's a reference to the tzres DLL, load the actual string
+ if value.startswith('@tzres'):
+ tz_res = tz_res or tzres()
+ value = tz_res.name_from_string(value)
+
+ value = value.rstrip('\x00') # Remove trailing nulls
+
+ dout[key_name] = value
+
+ return dout
diff --git a/app/lib/dateutil/tzwin.py b/app/lib/dateutil/tzwin.py
new file mode 100644
index 0000000..55cd910
--- /dev/null
+++ b/app/lib/dateutil/tzwin.py
@@ -0,0 +1,2 @@
+# tzwin has moved to dateutil.tz.win
+from .tz.win import *
\ No newline at end of file
diff --git a/app/lib/dateutil/zoneinfo/__init__.py b/app/lib/dateutil/zoneinfo/__init__.py
new file mode 100644
index 0000000..7145e05
--- /dev/null
+++ b/app/lib/dateutil/zoneinfo/__init__.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+import logging
+import os
+import warnings
+import tempfile
+import shutil
+import json
+
+from tarfile import TarFile
+from pkgutil import get_data
+from io import BytesIO
+from contextlib import closing
+
+from dateutil.tz import tzfile
+
+__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
+
+ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
+METADATA_FN = 'METADATA'
+
+# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
+# it's close enough for python2.6
+tar_open = TarFile.open
+if not hasattr(TarFile, '__exit__'):
+ def tar_open(*args, **kwargs):
+ return closing(TarFile.open(*args, **kwargs))
+
+
+class tzfile(tzfile):
+ def __reduce__(self):
+ return (gettz, (self._filename,))
+
+
+def getzoneinfofile_stream():
+ try:
+ return BytesIO(get_data(__name__, ZONEFILENAME))
+ except IOError as e: # TODO switch to FileNotFoundError?
+ warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
+ return None
+
+
+class ZoneInfoFile(object):
+ def __init__(self, zonefile_stream=None):
+ if zonefile_stream is not None:
+ with tar_open(fileobj=zonefile_stream, mode='r') as tf:
+ # dict comprehension does not work on python2.6
+ # TODO: get back to the nicer syntax when we ditch python2.6
+ # self.zones = {zf.name: tzfile(tf.extractfile(zf),
+ # filename = zf.name)
+ # for zf in tf.getmembers() if zf.isfile()}
+ self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
+ filename=zf.name))
+ for zf in tf.getmembers()
+ if zf.isfile() and zf.name != METADATA_FN)
+ # deal with links: They'll point to their parent object. Less
+ # waste of memory
+ # links = {zl.name: self.zones[zl.linkname]
+ # for zl in tf.getmembers() if zl.islnk() or zl.issym()}
+ links = dict((zl.name, self.zones[zl.linkname])
+ for zl in tf.getmembers() if
+ zl.islnk() or zl.issym())
+ self.zones.update(links)
+ try:
+ metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
+ metadata_str = metadata_json.read().decode('UTF-8')
+ self.metadata = json.loads(metadata_str)
+ except KeyError:
+ # no metadata in tar file
+ self.metadata = None
+ else:
+ self.zones = dict()
+ self.metadata = None
+
+ def get(self, name, default=None):
+ """
+ Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
+ for retrieving zones from the zone dictionary.
+
+ :param name:
+ The name of the zone to retrieve. (Generally IANA zone names)
+
+ :param default:
+ The value to return in the event of a missing key.
+
+ .. versionadded:: 2.6.0
+
+ """
+ return self.zones.get(name, default)
+
+
+# The current API has gettz as a module function, although in fact it taps into
+# a stateful class. So as a workaround for now, without changing the API, we
+# will create a new "global" class instance the first time a user requests a
+# timezone. Ugly, but adheres to the api.
+#
+# TODO: Remove after deprecation period.
+_CLASS_ZONE_INSTANCE = list()
+
+def get_zonefile_instance(new_instance=False):
+ """
+ This is a convenience function which provides a :class:`ZoneInfoFile`
+ instance using the data provided by the ``dateutil`` package. By default, it
+ caches a single instance of the ZoneInfoFile object and returns that.
+
+ :param new_instance:
+ If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
+ used as the cached instance for the next call. Otherwise, new instances
+ are created only as necessary.
+
+ :return:
+ Returns a :class:`ZoneInfoFile` object.
+
+ .. versionadded:: 2.6
+ """
+ if new_instance:
+ zif = None
+ else:
+ zif = getattr(get_zonefile_instance, '_cached_instance', None)
+
+ if zif is None:
+ zif = ZoneInfoFile(getzoneinfofile_stream())
+
+ get_zonefile_instance._cached_instance = zif
+
+ return zif
+
+def gettz(name):
+ """
+ This retrieves a time zone from the local zoneinfo tarball that is packaged
+ with dateutil.
+
+ :param name:
+ An IANA-style time zone name, as found in the zoneinfo file.
+
+ :return:
+ Returns a :class:`dateutil.tz.tzfile` time zone object.
+
+ .. warning::
+ It is generally inadvisable to use this function, and it is only
+ provided for API compatibility with earlier versions. This is *not*
+ equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
+ time zone based on the inputs, favoring system zoneinfo. This is ONLY
+ for accessing the dateutil-specific zoneinfo (which may be out of
+ date compared to the system zoneinfo).
+
+ .. deprecated:: 2.6
+ If you need to use a specific zoneinfofile over the system zoneinfo,
+ instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
+ :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
+
+ Use :func:`get_zonefile_instance` to retrieve an instance of the
+ dateutil-provided zoneinfo.
+ """
+ warnings.warn("zoneinfo.gettz() will be removed in future versions, "
+ "to use the dateutil-provided zoneinfo files, instantiate a "
+ "ZoneInfoFile object and use ZoneInfoFile.zones.get() "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].zones.get(name)
+
+
+def gettz_db_metadata():
+ """ Get the zonefile metadata
+
+ See `zonefile_metadata`_
+
+ :returns:
+ A dictionary with the database metadata
+
+ .. deprecated:: 2.6
+ See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
+ query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
+ """
+ warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
+ "versions, to use the dateutil-provided zoneinfo files, "
+ "ZoneInfoFile object and query the 'metadata' attribute "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].metadata
+
+
diff --git a/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
new file mode 100644
index 0000000..1d15597
Binary files /dev/null and b/app/lib/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ
diff --git a/app/lib/dateutil/zoneinfo/rebuild.py b/app/lib/dateutil/zoneinfo/rebuild.py
new file mode 100644
index 0000000..a66c1d9
--- /dev/null
+++ b/app/lib/dateutil/zoneinfo/rebuild.py
@@ -0,0 +1,51 @@
+import logging
+import os
+import tempfile
+import shutil
+import json
+from subprocess import check_call
+
+from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME
+
+
+def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
+ """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
+
+ filename is the timezone tarball from ftp.iana.org/tz.
+
+ """
+ tmpdir = tempfile.mkdtemp()
+ zonedir = os.path.join(tmpdir, "zoneinfo")
+ moduledir = os.path.dirname(__file__)
+ try:
+ with tar_open(filename) as tf:
+ for name in zonegroups:
+ tf.extract(name, tmpdir)
+ filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
+ try:
+ check_call(["zic", "-d", zonedir] + filepaths)
+ except OSError as e:
+ _print_on_nosuchfile(e)
+ raise
+ # write metadata file
+ with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
+ json.dump(metadata, f, indent=4, sort_keys=True)
+ target = os.path.join(moduledir, ZONEFILENAME)
+ with tar_open(target, "w:%s" % format) as tf:
+ for entry in os.listdir(zonedir):
+ entrypath = os.path.join(zonedir, entry)
+ tf.add(entrypath, entry)
+ finally:
+ shutil.rmtree(tmpdir)
+
+def _print_on_nosuchfile(e):
+ """Print helpful troubleshooting message
+
+ e is an exception raised by subprocess.check_call()
+
+ """
+ if e.errno == 2:
+ logging.error(
+ "Could not find zic. Perhaps you need to install "
+ "libc-bin or some other package that provides it, "
+ "or it's not in your PATH?")
diff --git a/app/lib/flask/__init__.py b/app/lib/flask/__init__.py
new file mode 100644
index 0000000..2fcb356
--- /dev/null
+++ b/app/lib/flask/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+ flask
+ ~~~~~
+
+ A microframework based on Werkzeug. It's extensively documented
+ and follows best practice patterns.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+__version__ = '0.12.1'
+
+# utilities we import from Werkzeug and Jinja2 that are unused
+# in the module but are exported as public interface.
+from werkzeug.exceptions import abort
+from werkzeug.utils import redirect
+from jinja2 import Markup, escape
+
+from .app import Flask, Request, Response
+from .config import Config
+from .helpers import url_for, flash, send_file, send_from_directory, \
+ get_flashed_messages, get_template_attribute, make_response, safe_join, \
+ stream_with_context
+from .globals import current_app, g, request, session, _request_ctx_stack, \
+ _app_ctx_stack
+from .ctx import has_request_context, has_app_context, \
+ after_this_request, copy_current_request_context
+from .blueprints import Blueprint
+from .templating import render_template, render_template_string
+
+# the signals
+from .signals import signals_available, template_rendered, request_started, \
+ request_finished, got_request_exception, request_tearing_down, \
+ appcontext_tearing_down, appcontext_pushed, \
+ appcontext_popped, message_flashed, before_render_template
+
+# We're not exposing the actual json module but a convenient wrapper around
+# it.
+from . import json
+
+# This was the only thing that Flask used to export at one point and it had
+# a more generic name.
+jsonify = json.jsonify
+
+# backwards compat, goes away in 1.0
+from .sessions import SecureCookieSession as Session
+json_available = True
diff --git a/app/lib/flask/__main__.py b/app/lib/flask/__main__.py
new file mode 100644
index 0000000..cbefccd
--- /dev/null
+++ b/app/lib/flask/__main__.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.__main__
+ ~~~~~~~~~~~~~~
+
+ Alias for flask.run for the command line.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+if __name__ == '__main__':
+ from .cli import main
+ main(as_module=True)
diff --git a/app/lib/flask/_compat.py b/app/lib/flask/_compat.py
new file mode 100644
index 0000000..071628f
--- /dev/null
+++ b/app/lib/flask/_compat.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+"""
+ flask._compat
+ ~~~~~~~~~~~~~
+
+ Some py2/py3 compatibility support based on a stripped down
+ version of six so we don't have to depend on a specific version
+ of it.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+_identity = lambda x: x
+
+
+if not PY2:
+ text_type = str
+ string_types = (str,)
+ integer_types = (int,)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+
+ from io import StringIO
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+ implements_to_string = _identity
+
+else:
+ text_type = unicode
+ string_types = (str, unicode)
+ integer_types = (int, long)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+
+ from cStringIO import StringIO
+
+ exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+ def implements_to_string(cls):
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ return cls
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a
+ # dummy metaclass for one level of class instantiation that replaces
+ # itself with the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+# Certain versions of pypy have a bug where clearing the exception stack
+# breaks the __exit__ function in a very peculiar way. The second level of
+# exception blocks is necessary because pypy seems to forget to check if an
+# exception happened until the next bytecode instruction?
+#
+# Relevant PyPy bugfix commit:
+# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301
+# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later
+# versions.
+#
+# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug.
+BROKEN_PYPY_CTXMGR_EXIT = False
+if hasattr(sys, 'pypy_version_info'):
+ class _Mgr(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, *args):
+ if hasattr(sys, 'exc_clear'):
+ # Python 3 (PyPy3) doesn't have exc_clear
+ sys.exc_clear()
+ try:
+ try:
+ with _Mgr():
+ raise AssertionError()
+ except:
+ raise
+ except TypeError:
+ BROKEN_PYPY_CTXMGR_EXIT = True
+ except AssertionError:
+ pass
diff --git a/src/lib/flask/app.py b/app/lib/flask/app.py
similarity index 80%
rename from src/lib/flask/app.py
rename to app/lib/flask/app.py
index 98ecb10..1404e17 100644
--- a/src/lib/flask/app.py
+++ b/app/lib/flask/app.py
@@ -5,10 +5,9 @@
This module implements the central WSGI application object.
- :copyright: (c) 2011 by Armin Ronacher.
+ :copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
-
import os
import sys
from threading import Lock
@@ -19,17 +18,17 @@
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
- MethodNotAllowed, BadRequest
+ MethodNotAllowed, BadRequest, default_exceptions
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
- locked_cached_property, _endpoint_from_view_func, find_package
-from . import json
+ locked_cached_property, _endpoint_from_view_func, find_package, \
+ get_debug_flag
+from . import json, cli
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
-from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
@@ -39,6 +38,9 @@
# a lock used for logger initialization
_logger_lock = Lock()
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
def _make_timedelta(value):
if not isinstance(value, timedelta):
@@ -72,21 +74,21 @@ class Flask(_PackageBoundObject):
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
- an `__init__.py` file inside) or a standard module (just a `.py` file).
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
- in the `__init__.py` file of your package like this::
+ in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
- The idea of the first parameter is to give Flask an idea what
+ The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
- on the file system, can be used by extensions to improve debugging
+ on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
@@ -94,7 +96,7 @@ class Flask(_PackageBoundObject):
using a package, it's usually recommended to hardcode the name of
your package there.
- For example if your application is defined in `yourapplication/app.py`
+ For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
@@ -118,6 +120,9 @@ class Flask(_PackageBoundObject):
The `instance_path` and `instance_relative_config` parameters were
added.
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
@@ -133,10 +138,15 @@ class Flask(_PackageBoundObject):
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
- :param instance_relative_config: if set to `True` relative filenames
+ :param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
+ :param root_path: Flask by default will automatically calculate the path
+ to the root of the application. In certain situations
+ this cannot be achieved (for instance if the package
+ is a Python 3 namespace package) and needs to be
+ manually defined.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
@@ -147,18 +157,23 @@ class Flask(_PackageBoundObject):
#: :class:`~flask.Response` for more information.
response_class = Response
+ #: The class that is used for the Jinja environment.
+ #:
+ #: .. versionadded:: 0.11
+ jinja_environment = Environment
+
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
- #: 3. Return None instead of AttributeError on expected attributes.
+ #: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
- #: flask.g object is not application context scoped.
+ #: flask.g object is now application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
@@ -175,16 +190,27 @@ def _set_request_globals_class(self, value):
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
- #: The debug flag. Set this to `True` to enable debugging of the
+ #: The class that is used for the ``config`` attribute of this app.
+ #: Defaults to :class:`~flask.Config`.
+ #:
+ #: Example use cases for a custom class:
+ #:
+ #: 1. Default values for certain config options.
+ #: 2. Access to config values through attributes in addition to keys.
+ #:
+ #: .. versionadded:: 0.11
+ config_class = Config
+
+ #: The debug flag. Set this to ``True`` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
- #: This attribute can also be configured from the config with the `DEBUG`
- #: configuration key. Defaults to `False`.
+ #: This attribute can also be configured from the config with the ``DEBUG``
+ #: configuration key. Defaults to ``False``.
debug = ConfigAttribute('DEBUG')
- #: The testing flag. Set this to `True` to enable the test mode of
+ #: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
@@ -193,7 +219,7 @@ def _set_request_globals_class(self, value):
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
- #: `TESTING` configuration key. Defaults to `False`.
+ #: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
@@ -201,13 +227,13 @@ def _set_request_globals_class(self, value):
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
- #: `SECRET_KEY` configuration key. Defaults to `None`.
+ #: ``SECRET_KEY`` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
- #: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
+ #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
@@ -215,11 +241,21 @@ def _set_request_globals_class(self, value):
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
- #: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
+ #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
+ #: A :class:`~datetime.timedelta` which is used as default cache_timeout
+ #: for the :func:`send_file` functions. The default is 12 hours.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
+ #: variable can also be set with an integer value used as seconds.
+ #: Defaults to ``timedelta(hours=12)``
+ send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
+ get_converter=_make_timedelta)
+
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
@@ -227,7 +263,7 @@ def _set_request_globals_class(self, value):
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
- #: `USE_X_SENDFILE` configuration key. Defaults to `False`.
+ #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
@@ -236,23 +272,6 @@ def _set_request_globals_class(self, value):
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
- #: Enable the deprecated module support? This is active by default
- #: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
- #: will be removed in favor of Blueprints
- enable_modules = True
-
- #: The logging format used for the debug logger. This is only used when
- #: the application is in debug mode, otherwise the attached logging
- #: handler does the formatting.
- #:
- #: .. versionadded:: 0.3
- debug_log_format = (
- '-' * 80 + '\n' +
- '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
- '%(message)s\n' +
- '-' * 80
- )
-
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
@@ -270,7 +289,7 @@ def _set_request_globals_class(self, value):
#: Default configuration parameters.
default_config = ImmutableDict({
- 'DEBUG': False,
+ 'DEBUG': get_debug_flag(default=False),
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
@@ -278,6 +297,7 @@ def _set_request_globals_class(self, value):
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
+ 'LOGGER_HANDLER_POLICY': 'always',
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
@@ -285,14 +305,18 @@ def _set_request_globals_class(self, value):
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
+ 'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
- 'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
+ 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
+ 'EXPLAIN_TEMPLATE_LOADING': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
+ 'JSONIFY_MIMETYPE': 'application/json',
+ 'TEMPLATES_AUTO_RELOAD': None,
})
#: The rule object to use for URL rules created. This is used by
@@ -314,9 +338,11 @@ def _set_request_globals_class(self, value):
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
- instance_path=None, instance_relative_config=False):
+ instance_path=None, instance_relative_config=False,
+ root_path=None):
_PackageBoundObject.__init__(self, import_name,
- template_folder=template_folder)
+ template_folder=template_folder,
+ root_path=root_path)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
@@ -357,11 +383,11 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
- #: A dictionary of all registered error handlers. The key is `None`
+ #: A dictionary of all registered error handlers. The key is ``None``
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
- #: where they key is the status code of the http exception. The
- #: special key `None` points to a list of tuples where the first item
+ #: where the key is the status code of the http exception. The
+ #: special key ``None`` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
@@ -372,7 +398,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
- #: returns `None` or raises a `BuildError` the next function is
+ #: returns ``None`` or raises a :exc:`BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
@@ -380,7 +406,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
- #: the blueprint this function is active for, `None` for all requests.
+ #: the blueprint this function is active for, ``None`` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
@@ -395,16 +421,15 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
- #: this function is active for, `None` for all requests. This can for
- #: example be used to open database connections or getting hold of the
- #: currently logged in user. To register a function here, use the
- #: :meth:`after_request` decorator.
+ #: this function is active for, ``None`` for all requests. This can for
+ #: example be used to close database connections. To register a function
+ #: here, use the :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
- #: `None` for all requests. These functions are not allowed to modify
+ #: ``None`` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
@@ -424,7 +449,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
- #: `None` here is used for application wide
+ #: ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
@@ -432,7 +457,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
- #: preprocessors. The key `None` here is used for application wide
+ #: preprocessors. The key ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
@@ -445,7 +470,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
- #: name of the blueprint this function is active for, `None` for all
+ #: name of the blueprint this function is active for, ``None`` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
@@ -453,12 +478,19 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
None: [_default_template_ctx_processor]
}
+ #: A list of shell context processor functions that should be run
+ #: when a shell context is created.
+ #:
+ #: .. versionadded:: 0.11
+ self.shell_context_processors = []
+
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
+ self._blueprint_order = []
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
@@ -469,8 +501,8 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
- #: The key must match the name of the `flaskext` module. For example in
- #: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
+ #: The key must match the name of the extension module. For example in
+ #: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
@@ -486,7 +518,7 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
- #: return ','.join(BaseConverter.to_url(value)
+ #: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
@@ -508,6 +540,14 @@ def __init__(self, import_name, static_path=None, static_url_path=None,
endpoint='static',
view_func=self.send_static_file)
+ #: The click command line context for this application. Commands
+ #: registered here show up in the :command:`flask` command once the
+ #: application has been discovered. The default commands are
+ #: provided by Flask itself and can be overridden.
+ #:
+ #: This is an instance of a :class:`click.Group` object.
+ self.cli = cli.AppGroup(self.name)
+
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
@@ -538,7 +578,7 @@ def name(self):
@property
def propagate_exceptions(self):
- """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
+ """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
@@ -550,7 +590,7 @@ def propagate_exceptions(self):
@property
def preserve_context_on_exception(self):
- """Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
+ """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
configuration value in case it's set, otherwise a sensible default
is returned.
@@ -590,7 +630,7 @@ def jinja_env(self):
@property
def got_first_request(self):
- """This attribute is set to `True` if the application started
+ """This attribute is set to ``True`` if the application started
handling the first request.
.. versionadded:: 0.8
@@ -609,7 +649,7 @@ def make_config(self, instance_relative=False):
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
- return Config(root_path, self.default_config)
+ return self.config_class(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
@@ -643,11 +683,19 @@ def create_jinja_environment(self):
this function to customize the behavior.
.. versionadded:: 0.5
+ .. versionchanged:: 0.11
+ ``Environment.auto_reload`` set in accordance with
+ ``TEMPLATES_AUTO_RELOAD`` configuration option.
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
- rv = Environment(self, **options)
+ if 'auto_reload' not in options:
+ if self.config['TEMPLATES_AUTO_RELOAD'] is not None:
+ options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD']
+ else:
+ options['auto_reload'] = self.debug
+ rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
@@ -685,13 +733,13 @@ def init_jinja_globals(self):
"""
def select_jinja_autoescape(self, filename):
- """Returns `True` if autoescaping should be active for the given
- template name.
+ """Returns ``True`` if autoescaping should be active for the given
+ template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
- return False
+ return True
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
@@ -719,9 +767,26 @@ def update_template_context(self, context):
# existing views.
context.update(orig_ctx)
+ def make_shell_context(self):
+ """Returns the shell context for an interactive shell for this
+ application. This runs all the registered shell context
+ processors.
+
+ .. versionadded:: 0.11
+ """
+ rv = {'app': self, 'g': g}
+ for processor in self.shell_context_processors:
+ rv.update(processor())
+ return rv
+
def run(self, host=None, port=None, debug=None, **options):
- """Runs the application on a local development server. If the
- :attr:`debug` flag is set the server will automatically reload
+ """Runs the application on a local development server.
+
+ Do not use ``run()`` in a production setting. It is not intended to
+ meet security and performance requirements for a production server.
+ Instead, see :ref:`deployment` for WSGI server recommendations.
+
+ If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
@@ -729,13 +794,17 @@ def run(self, host=None, port=None, debug=None, **options):
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
+ It is not recommended to use this function for development with
+ automatic reloading as this is badly supported. Instead you should
+ be using the :command:`flask` command line script's ``run`` support.
+
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
- Setting ``use_debugger`` to `True` without being in debug mode
+ Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
@@ -772,11 +841,11 @@ def run(self, host=None, port=None, debug=None, **options):
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
- # resetted normally. This makes it possible to restart the server
+ # reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
- def test_client(self, use_cookies=True):
+ def test_client(self, use_cookies=True, **kwargs):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
@@ -791,28 +860,46 @@ def test_client(self, use_cookies=True):
app.testing = True
client = app.test_client()
- The test client can be used in a `with` block to defer the closing down
- of the context until the end of the `with` block. This is useful if
+ The test client can be used in a ``with`` block to defer the closing down
+ of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
+ Additionally, you may pass optional keyword arguments that will then
+ be passed to the application's :attr:`test_client_class` constructor.
+ For example::
+
+ from flask.testing import FlaskClient
+
+ class CustomClient(FlaskClient):
+ def __init__(self, *args, **kwargs):
+ self._authentication = kwargs.pop("authentication")
+ super(CustomClient,self).__init__( *args, **kwargs)
+
+ app.test_client_class = CustomClient
+ client = app.test_client(authentication='Basic ....')
+
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
- added support for `with` block usage for the client.
+ added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
+
+ .. versionchanged:: 0.11
+ Added `**kwargs` to support passing additional keyword arguments to
+ the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
- return cls(self, self.response_class, use_cookies=use_cookies)
+ return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
@@ -844,32 +931,6 @@ def make_null_session(self):
"""
return self.session_interface.make_null_session(self)
- def register_module(self, module, **options):
- """Registers a module with this application. The keyword argument
- of this function are the same as the ones for the constructor of the
- :class:`Module` class and will override the values of the module if
- provided.
-
- .. versionchanged:: 0.7
- The module system was deprecated in favor for the blueprint
- system.
- """
- assert blueprint_is_module(module), 'register_module requires ' \
- 'actual module objects. Please upgrade to blueprints though.'
- if not self.enable_modules:
- raise RuntimeError('Module support was disabled but code '
- 'attempted to register a module named %r' % module)
- else:
- from warnings import warn
- warn(DeprecationWarning('Modules are deprecated. Upgrade to '
- 'using blueprints. Have a look into the documentation for '
- 'more information. If this module was registered by a '
- 'Flask-Extension upgrade the extension or contact the author '
- 'of that extension instead. (Registered %r)' % module),
- stacklevel=2)
-
- self.register_blueprint(module, **options)
-
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
@@ -885,9 +946,17 @@ def register_blueprint(self, blueprint, **options):
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
+ self._blueprint_order.append(blueprint)
first_registration = True
blueprint.register(self, options, first_registration)
+ def iter_blueprints(self):
+ """Iterates over all blueprints by the order they were registered.
+
+ .. versionadded:: 0.11
+ """
+ return iter(self._blueprint_order)
+
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
@@ -921,7 +990,7 @@ def index():
`view_func` parameter added.
.. versionchanged:: 0.6
- `OPTIONS` is added automatically as method.
+ ``OPTIONS`` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
@@ -933,9 +1002,9 @@ def index():
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
- to (`GET`, `POST` etc.). By default a rule
- just listens for `GET` (and implicitly `HEAD`).
- Starting with Flask 0.6, `OPTIONS` is implicitly
+ to (``GET``, ``POST`` etc.). By default a rule
+ just listens for ``GET`` (and implicitly ``HEAD``).
+ Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
@@ -945,10 +1014,13 @@ def index():
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
- # a tuple of only `GET` as default.
+ # a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
- methods = set(methods)
+ if isinstance(methods, string_types):
+ raise TypeError('Allowed methods have to be iterables of strings, '
+ 'for example: @app.route(..., methods=["POST"])')
+ methods = set(item.upper() for item in methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
@@ -968,18 +1040,13 @@ def index():
# Add the required methods now.
methods |= required_methods
- # due to a werkzeug bug we need to make sure that the defaults are
- # None if they are an empty dictionary. This should not be necessary
- # with Werkzeug 0.7
- options['defaults'] = options.get('defaults') or None
-
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
- if old_func is not None and old_func is not view_func:
+ if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
@@ -1003,9 +1070,9 @@ def index():
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
- to (`GET`, `POST` etc.). By default a rule
- just listens for `GET` (and implicitly `HEAD`).
- Starting with Flask 0.6, `OPTIONS` is implicitly
+ to (``GET``, ``POST`` etc.). By default a rule
+ just listens for ``GET`` (and implicitly ``HEAD``).
+ Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
@@ -1030,9 +1097,24 @@ def decorator(f):
return f
return decorator
+ @staticmethod
+ def _get_exc_class_and_code(exc_class_or_code):
+ """Ensure that we register only exceptions as handler keys"""
+ if isinstance(exc_class_or_code, integer_types):
+ exc_class = default_exceptions[exc_class_or_code]
+ else:
+ exc_class = exc_class_or_code
+
+ assert issubclass(exc_class, Exception)
+
+ if issubclass(exc_class, HTTPException):
+ return exc_class, exc_class.code
+ else:
+ return exc_class, None
+
@setupmethod
def errorhandler(self, code_or_exception):
- """A decorator that is used to register a function give a given
+ """A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
@@ -1057,15 +1139,21 @@ def page_not_found(error):
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
- The first `None` refers to the active blueprint. If the error
- handler should be application wide `None` shall be used.
+ The first ``None`` refers to the active blueprint. If the error
+ handler should be application wide ``None`` shall be used.
+
+ .. versionadded:: 0.7
+ Use :meth:`register_error_handler` instead of modifying
+ :attr:`error_handler_spec` directly, for application wide error
+ handlers.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
- :param code: the code as integer for the handler
+ :param code_or_exception: the code as integer for the handler, or
+ an arbitrary exception
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
@@ -1083,16 +1171,21 @@ def register_error_handler(self, code_or_exception, f):
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
- if isinstance(code_or_exception, HTTPException):
- code_or_exception = code_or_exception.code
- if isinstance(code_or_exception, integer_types):
- assert code_or_exception != 500 or key is None, \
- 'It is currently not possible to register a 500 internal ' \
- 'server error on a per-blueprint level.'
- self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
- else:
- self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
- .append((code_or_exception, f))
+ """
+ :type key: None|str
+ :type code_or_exception: int|T<=Exception
+ :type f: callable
+ """
+ if isinstance(code_or_exception, HTTPException): # old broken behavior
+ raise ValueError(
+ 'Tried to register a handler for an exception instance {0!r}. '
+ 'Handlers can only be registered for exception classes or HTTP error codes.'
+ .format(code_or_exception))
+
+ exc_class, code = self._get_exc_class_and_code(code_or_exception)
+
+ handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
+ handlers[exc_class] = f
@setupmethod
def template_filter(self, name=None):
@@ -1159,7 +1252,6 @@ def add_template_test(self, f, name=None):
"""
self.jinja_env.tests[name or f.__name__] = f
-
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
@@ -1194,7 +1286,13 @@ def add_template_global(self, f, name=None):
@setupmethod
def before_request(self, f):
- """Registers a function to run before each request."""
+ """Registers a function to run before each request.
+
+ The function will be called without any arguments.
+ If the function returns a non-None value, it's handled as
+ if it was the return value from the view and further
+ request handling is stopped.
+ """
self.before_request_funcs.setdefault(None, []).append(f)
return f
@@ -1203,15 +1301,21 @@ def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
+ The function will be called without any arguments and its return
+ value is ignored.
+
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
+ return f
@setupmethod
def after_request(self, f):
- """Register a function to be run after each request. Your function
- must take one parameter, a :attr:`response_class` object and return
- a new response object or the same (see :meth:`process_response`).
+ """Register a function to be run after each request.
+
+ Your function must take one parameter, an instance of
+ :attr:`response_class` and return a new response object or the
+ same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
@@ -1246,10 +1350,12 @@ def teardown_request(self, f):
When a teardown function was called because of a exception it will
be passed an error object.
+ The return values of teardown functions are ignored.
+
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
- immediately. Instead if will keep it alive so that the interactive
+ immediately. Instead it will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
@@ -1280,6 +1386,8 @@ def teardown_appcontext(self, f):
When a teardown function was called because of an exception it will
be passed an error object.
+ The return values of teardown functions are ignored.
+
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
@@ -1291,6 +1399,15 @@ def context_processor(self, f):
self.template_context_processors[None].append(f)
return f
+ @setupmethod
+ def shell_context_processor(self, f):
+ """Registers a shell context processor function.
+
+ .. versionadded:: 0.11
+ """
+ self.shell_context_processors.append(f)
+ return f
+
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
@@ -1309,6 +1426,33 @@ def url_defaults(self, f):
self.url_default_functions.setdefault(None, []).append(f)
return f
+ def _find_error_handler(self, e):
+ """Finds a registered error handler for the request’s blueprint.
+ Otherwise falls back to the app, returns None if not a suitable
+ handler is found.
+ """
+ exc_class, code = self._get_exc_class_and_code(type(e))
+
+ def find_handler(handler_map):
+ if not handler_map:
+ return
+ for cls in exc_class.__mro__:
+ handler = handler_map.get(cls)
+ if handler is not None:
+ # cache for next time exc_class is raised
+ handler_map[exc_class] = handler
+ return handler
+
+ # try blueprint handlers
+ handler = find_handler(self.error_handler_spec
+ .get(request.blueprint, {})
+ .get(code))
+ if handler is not None:
+ return handler
+
+ # fall back to app handlers
+ return find_handler(self.error_handler_spec[None].get(code))
+
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
@@ -1316,27 +1460,24 @@ def handle_http_exception(self, e):
.. versionadded:: 0.3
"""
- handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
- if handlers and e.code in handlers:
- handler = handlers[e.code]
- else:
- handler = self.error_handler_spec[None].get(e.code)
+
+ handler = self._find_error_handler(e)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
- this will return `False` for all exceptions except for a bad request
- key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
- also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
+ this will return ``False`` for all exceptions except for a bad request
+ key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
+ also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
- If it returns `True` for any exception the error handler for this
+ If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
@@ -1366,19 +1507,15 @@ def handle_user_exception(self, e):
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
+
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
- blueprint_handlers = ()
- handlers = self.error_handler_spec.get(request.blueprint)
- if handlers is not None:
- blueprint_handlers = handlers.get(None, ())
- app_handlers = self.error_handler_spec[None].get(None, ())
- for typecheck, handler in chain(blueprint_handlers, app_handlers):
- if isinstance(e, typecheck):
- return handler(e)
+ handler = self._find_error_handler(e)
- reraise(exc_type, exc_value, tb)
+ if handler is None:
+ reraise(exc_type, exc_value, tb)
+ return handler(e)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
@@ -1392,7 +1529,7 @@ def handle_exception(self, e):
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
- handler = self.error_handler_spec[None].get(500)
+ handler = self._find_error_handler(InternalServerError())
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
@@ -1407,7 +1544,7 @@ def handle_exception(self, e):
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
- return handler(e)
+ return self.finalize_request(handler(e), from_error_handler=True)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
@@ -1475,9 +1612,30 @@ def full_dispatch_request(self):
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
+ return self.finalize_request(rv)
+
+ def finalize_request(self, rv, from_error_handler=False):
+ """Given the return value from a view function this finalizes
+ the request by converting it into a response and invoking the
+ postprocessing functions. This is invoked for both normal
+ request dispatching as well as error handlers.
+
+ Because this means that it might be called as a result of a
+ failure a special safe mode is available which can be enabled
+ with the `from_error_handler` flag. If enabled, failures in
+ response processing will be logged and otherwise ignored.
+
+ :internal:
+ """
response = self.make_response(rv)
- response = self.process_response(response)
- request_finished.send(self, response=response)
+ try:
+ response = self.process_response(response)
+ request_finished.send(self, response=response)
+ except Exception:
+ if not from_error_handler:
+ raise
+ self.logger.exception('Request finalizing failed with an '
+ 'error while handling an error')
return response
def try_trigger_before_first_request_functions(self):
@@ -1492,14 +1650,14 @@ def try_trigger_before_first_request_functions(self):
with self._before_request_lock:
if self._got_first_request:
return
- self._got_first_request = True
for func in self.before_first_request_funcs:
func()
+ self._got_first_request = True
def make_default_options_response(self):
- """This method is called to create the default `OPTIONS` response.
+ """This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
- behavior of `OPTIONS` responses.
+ behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
@@ -1522,7 +1680,7 @@ def make_default_options_response(self):
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
- function returns `True` then the teardown handlers will not be
+ function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
@@ -1546,9 +1704,10 @@ def make_response(self, rv):
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
- headers)`` where `response` is any of the
+ headers)`` or ``(response, headers)``
+ where `response` is any of the
types defined here, `status` is a string
- or an integer and `headers` is a list of
+ or an integer and `headers` is a list or
a dictionary with header values.
======================= ===========================================
@@ -1558,29 +1717,33 @@ def make_response(self, rv):
Previously a tuple was interpreted as the arguments for the
response object.
"""
- status = headers = None
+ status_or_headers = headers = None
if isinstance(rv, tuple):
- rv, status, headers = rv + (None,) * (3 - len(rv))
+ rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
+ if isinstance(status_or_headers, (dict, list)):
+ headers, status_or_headers = status_or_headers, None
+
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
- rv = self.response_class(rv, headers=headers, status=status)
- headers = status = None
+ rv = self.response_class(rv, headers=headers,
+ status=status_or_headers)
+ headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
- if status is not None:
- if isinstance(status, string_types):
- rv.status = status
+ if status_or_headers is not None:
+ if isinstance(status_or_headers, string_types):
+ rv.status = status_or_headers
else:
- rv.status_code = status
+ rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
@@ -1631,8 +1794,9 @@ def handle_url_build_error(self, error, endpoint, values):
rv = handler(error, endpoint, values)
if rv is not None:
return rv
- except BuildError as error:
- pass
+ except BuildError as e:
+ # make error available outside except block (py3)
+ error = e
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
@@ -1643,12 +1807,13 @@ def handle_url_build_error(self, error, endpoint, values):
def preprocess_request(self):
"""Called before the actual request dispatching and will
- call every as :meth:`before_request` decorated function.
- If any of these function returns a value it's handled as
+ call each :meth:`before_request` decorated function, passing no
+ arguments.
+ If any of these functions returns a value, it's handled as
if it was the return value from the view and further
request handling is stopped.
- This also triggers the :meth:`url_value_processor` functions before
+ This also triggers the :meth:`url_value_preprocessor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
@@ -1693,7 +1858,7 @@ def process_response(self, response):
self.save_session(ctx.session, response)
return response
- def do_teardown_request(self, exc=None):
+ def do_teardown_request(self, exc=_sentinel):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
@@ -1704,24 +1869,24 @@ def do_teardown_request(self, exc=None):
Added the `exc` argument. Previously this was always using the
current exception information.
"""
- if exc is None:
+ if exc is _sentinel:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
- rv = func(exc)
+ func(exc)
request_tearing_down.send(self, exc=exc)
- def do_teardown_appcontext(self, exc=None):
+ def do_teardown_appcontext(self, exc=_sentinel):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
- if exc is None:
+ if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
@@ -1745,15 +1910,15 @@ def app_context(self):
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
- combination with the `with` statement because the request is only bound
- to the current context for the duration of the `with` block.
+ combination with the ``with`` statement because the request is only bound
+ to the current context for the duration of the ``with`` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
- The object returned can also be used without the `with` statement
+ The object returned can also be used without the ``with`` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
@@ -1765,7 +1930,7 @@ def request_context(self, environ):
ctx.pop()
.. versionchanged:: 0.3
- Added support for non-with statement usage and `with` statement
+ Added support for non-with statement usage and ``with`` statement
is now passed the ctx object.
:param environ: a WSGI environment
@@ -1774,7 +1939,7 @@ def request_context(self, environ):
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
- :func:`werkzeug.test.EnvironBuilder` for more information, this
+ :class:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
@@ -1817,20 +1982,16 @@ def wsgi_app(self, environ, start_response):
response = self.full_dispatch_request()
except Exception as e:
error = e
- response = self.make_response(self.handle_exception(e))
+ response = self.handle_exception(e)
+ except:
+ error = sys.exc_info()[1]
+ raise
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
- @property
- def modules(self):
- from warnings import warn
- warn(DeprecationWarning('Flask.modules is deprecated, use '
- 'Flask.blueprints instead'), stacklevel=2)
- return self.blueprints
-
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
diff --git a/app/lib/flask/blueprints.py b/app/lib/flask/blueprints.py
new file mode 100644
index 0000000..586a1b0
--- /dev/null
+++ b/app/lib/flask/blueprints.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.blueprints
+ ~~~~~~~~~~~~~~~~
+
+ Blueprints are the recommended way to implement larger or more
+ pluggable applications in Flask 0.7 and later.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+from functools import update_wrapper
+
+from .helpers import _PackageBoundObject, _endpoint_from_view_func
+
+
+class BlueprintSetupState(object):
+ """Temporary holder object for registering a blueprint with the
+ application. An instance of this class is created by the
+ :meth:`~flask.Blueprint.make_setup_state` method and later passed
+ to all register callback functions.
+ """
+
+ def __init__(self, blueprint, app, options, first_registration):
+ #: a reference to the current application
+ self.app = app
+
+ #: a reference to the blueprint that created this setup state.
+ self.blueprint = blueprint
+
+ #: a dictionary with all options that were passed to the
+ #: :meth:`~flask.Flask.register_blueprint` method.
+ self.options = options
+
+ #: as blueprints can be registered multiple times with the
+ #: application and not everything wants to be registered
+ #: multiple times on it, this attribute can be used to figure
+ #: out if the blueprint was registered in the past already.
+ self.first_registration = first_registration
+
+ subdomain = self.options.get('subdomain')
+ if subdomain is None:
+ subdomain = self.blueprint.subdomain
+
+ #: The subdomain that the blueprint should be active for, ``None``
+ #: otherwise.
+ self.subdomain = subdomain
+
+ url_prefix = self.options.get('url_prefix')
+ if url_prefix is None:
+ url_prefix = self.blueprint.url_prefix
+
+ #: The prefix that should be used for all URLs defined on the
+ #: blueprint.
+ self.url_prefix = url_prefix
+
+ #: A dictionary with URL defaults that is added to each and every
+ #: URL that was defined with the blueprint.
+ self.url_defaults = dict(self.blueprint.url_values_defaults)
+ self.url_defaults.update(self.options.get('url_defaults', ()))
+
+ def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+ """A helper method to register a rule (and optionally a view function)
+ to the application. The endpoint is automatically prefixed with the
+ blueprint's name.
+ """
+ if self.url_prefix:
+ rule = self.url_prefix + rule
+ options.setdefault('subdomain', self.subdomain)
+ if endpoint is None:
+ endpoint = _endpoint_from_view_func(view_func)
+ defaults = self.url_defaults
+ if 'defaults' in options:
+ defaults = dict(defaults, **options.pop('defaults'))
+ self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
+ view_func, defaults=defaults, **options)
+
+
+class Blueprint(_PackageBoundObject):
+ """Represents a blueprint. A blueprint is an object that records
+ functions that will be called with the
+ :class:`~flask.blueprints.BlueprintSetupState` later to register functions
+ or other things on the main application. See :ref:`blueprints` for more
+ information.
+
+ .. versionadded:: 0.7
+ """
+
+ warn_on_modifications = False
+ _got_registered_once = False
+
+ def __init__(self, name, import_name, static_folder=None,
+ static_url_path=None, template_folder=None,
+ url_prefix=None, subdomain=None, url_defaults=None,
+ root_path=None):
+ _PackageBoundObject.__init__(self, import_name, template_folder,
+ root_path=root_path)
+ self.name = name
+ self.url_prefix = url_prefix
+ self.subdomain = subdomain
+ self.static_folder = static_folder
+ self.static_url_path = static_url_path
+ self.deferred_functions = []
+ if url_defaults is None:
+ url_defaults = {}
+ self.url_values_defaults = url_defaults
+
+ def record(self, func):
+ """Registers a function that is called when the blueprint is
+ registered on the application. This function is called with the
+ state as argument as returned by the :meth:`make_setup_state`
+ method.
+ """
+ if self._got_registered_once and self.warn_on_modifications:
+ from warnings import warn
+ warn(Warning('The blueprint was already registered once '
+ 'but is getting modified now. These changes '
+ 'will not show up.'))
+ self.deferred_functions.append(func)
+
+ def record_once(self, func):
+ """Works like :meth:`record` but wraps the function in another
+ function that will ensure the function is only called once. If the
+ blueprint is registered a second time on the application, the
+ function passed is not called.
+ """
+ def wrapper(state):
+ if state.first_registration:
+ func(state)
+ return self.record(update_wrapper(wrapper, func))
+
+ def make_setup_state(self, app, options, first_registration=False):
+ """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
+ object that is later passed to the register callback functions.
+ Subclasses can override this to return a subclass of the setup state.
+ """
+ return BlueprintSetupState(self, app, options, first_registration)
+
+ def register(self, app, options, first_registration=False):
+ """Called by :meth:`Flask.register_blueprint` to register a blueprint
+ on the application. This can be overridden to customize the register
+ behavior. Keyword arguments from
+ :func:`~flask.Flask.register_blueprint` are directly forwarded to this
+ method in the `options` dictionary.
+ """
+ self._got_registered_once = True
+ state = self.make_setup_state(app, options, first_registration)
+ if self.has_static_folder:
+ state.add_url_rule(self.static_url_path + '/',
+ view_func=self.send_static_file,
+ endpoint='static')
+
+ for deferred in self.deferred_functions:
+ deferred(state)
+
+ def route(self, rule, **options):
+ """Like :meth:`Flask.route` but for a blueprint. The endpoint for the
+ :func:`url_for` function is prefixed with the name of the blueprint.
+ """
+ def decorator(f):
+ endpoint = options.pop("endpoint", f.__name__)
+ self.add_url_rule(rule, endpoint, f, **options)
+ return f
+ return decorator
+
+ def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+ """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
+ the :func:`url_for` function is prefixed with the name of the blueprint.
+ """
+ if endpoint:
+ assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
+ self.record(lambda s:
+ s.add_url_rule(rule, endpoint, view_func, **options))
+
+ def endpoint(self, endpoint):
+ """Like :meth:`Flask.endpoint` but for a blueprint. This does not
+ prefix the endpoint with the blueprint name, this has to be done
+ explicitly by the user of this method. If the endpoint is prefixed
+ with a `.` it will be registered to the current blueprint, otherwise
+ it's an application independent endpoint.
+ """
+ def decorator(f):
+ def register_endpoint(state):
+ state.app.view_functions[endpoint] = f
+ self.record_once(register_endpoint)
+ return f
+ return decorator
+
+ def app_template_filter(self, name=None):
+ """Register a custom template filter, available application wide. Like
+ :meth:`Flask.template_filter` but for a blueprint.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_filter(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_filter(self, f, name=None):
+ """Register a custom template filter, available application wide. Like
+ :meth:`Flask.add_template_filter` but for a blueprint. Works exactly
+ like the :meth:`app_template_filter` decorator.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.filters[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def app_template_test(self, name=None):
+ """Register a custom template test, available application wide. Like
+ :meth:`Flask.template_test` but for a blueprint.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_test(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_test(self, f, name=None):
+ """Register a custom template test, available application wide. Like
+ :meth:`Flask.add_template_test` but for a blueprint. Works exactly
+ like the :meth:`app_template_test` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.tests[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def app_template_global(self, name=None):
+ """Register a custom template global, available application wide. Like
+ :meth:`Flask.template_global` but for a blueprint.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+ def decorator(f):
+ self.add_app_template_global(f, name=name)
+ return f
+ return decorator
+
+ def add_app_template_global(self, f, name=None):
+ """Register a custom template global, available application wide. Like
+ :meth:`Flask.add_template_global` but for a blueprint. Works exactly
+ like the :meth:`app_template_global` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+ def register_template(state):
+ state.app.jinja_env.globals[name or f.__name__] = f
+ self.record_once(register_template)
+
+ def before_request(self, f):
+ """Like :meth:`Flask.before_request` but for a blueprint. This function
+ is only executed before each request that is handled by a function of
+ that blueprint.
+ """
+ self.record_once(lambda s: s.app.before_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def before_app_request(self, f):
+ """Like :meth:`Flask.before_request`. Such a function is executed
+ before each request, even if outside of a blueprint.
+ """
+ self.record_once(lambda s: s.app.before_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def before_app_first_request(self, f):
+ """Like :meth:`Flask.before_first_request`. Such a function is
+ executed before the first request to the application.
+ """
+ self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
+ return f
+
+ def after_request(self, f):
+ """Like :meth:`Flask.after_request` but for a blueprint. This function
+ is only executed after each request that is handled by a function of
+ that blueprint.
+ """
+ self.record_once(lambda s: s.app.after_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def after_app_request(self, f):
+ """Like :meth:`Flask.after_request` but for a blueprint. Such a function
+ is executed after each request, even if outside of the blueprint.
+ """
+ self.record_once(lambda s: s.app.after_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def teardown_request(self, f):
+ """Like :meth:`Flask.teardown_request` but for a blueprint. This
+ function is only executed when tearing down requests handled by a
+ function of that blueprint. Teardown request functions are executed
+ when the request context is popped, even when no actual request was
+ performed.
+ """
+ self.record_once(lambda s: s.app.teardown_request_funcs
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def teardown_app_request(self, f):
+ """Like :meth:`Flask.teardown_request` but for a blueprint. Such a
+ function is executed when tearing down each request, even if outside of
+ the blueprint.
+ """
+ self.record_once(lambda s: s.app.teardown_request_funcs
+ .setdefault(None, []).append(f))
+ return f
+
+ def context_processor(self, f):
+ """Like :meth:`Flask.context_processor` but for a blueprint. This
+ function is only executed for requests handled by a blueprint.
+ """
+ self.record_once(lambda s: s.app.template_context_processors
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def app_context_processor(self, f):
+ """Like :meth:`Flask.context_processor` but for a blueprint. Such a
+ function is executed each request, even if outside of the blueprint.
+ """
+ self.record_once(lambda s: s.app.template_context_processors
+ .setdefault(None, []).append(f))
+ return f
+
+ def app_errorhandler(self, code):
+ """Like :meth:`Flask.errorhandler` but for a blueprint. This
+ handler is used for all requests, even if outside of the blueprint.
+ """
+ def decorator(f):
+ self.record_once(lambda s: s.app.errorhandler(code)(f))
+ return f
+ return decorator
+
+ def url_value_preprocessor(self, f):
+ """Registers a function as URL value preprocessor for this
+ blueprint. It's called before the view functions are called and
+ can modify the url values provided.
+ """
+ self.record_once(lambda s: s.app.url_value_preprocessors
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def url_defaults(self, f):
+ """Callback function for URL defaults for this blueprint. It's called
+ with the endpoint and values and should update the values passed
+ in place.
+ """
+ self.record_once(lambda s: s.app.url_default_functions
+ .setdefault(self.name, []).append(f))
+ return f
+
+ def app_url_value_preprocessor(self, f):
+ """Same as :meth:`url_value_preprocessor` but application wide.
+ """
+ self.record_once(lambda s: s.app.url_value_preprocessors
+ .setdefault(None, []).append(f))
+ return f
+
+ def app_url_defaults(self, f):
+ """Same as :meth:`url_defaults` but application wide.
+ """
+ self.record_once(lambda s: s.app.url_default_functions
+ .setdefault(None, []).append(f))
+ return f
+
+ def errorhandler(self, code_or_exception):
+ """Registers an error handler that becomes active for this blueprint
+ only. Please be aware that routing does not happen local to a
+ blueprint so an error handler for 404 usually is not handled by
+ a blueprint unless it is caused inside a view function. Another
+ special case is the 500 internal server error which is always looked
+ up from the application.
+
+ Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
+ of the :class:`~flask.Flask` object.
+ """
+ def decorator(f):
+ self.record_once(lambda s: s.app._register_error_handler(
+ self.name, code_or_exception, f))
+ return f
+ return decorator
+
+ def register_error_handler(self, code_or_exception, f):
+ """Non-decorator version of the :meth:`errorhandler` error attach
+ function, akin to the :meth:`~flask.Flask.register_error_handler`
+ application-wide function of the :class:`~flask.Flask` object but
+ for error handlers limited to this blueprint.
+
+ .. versionadded:: 0.11
+ """
+ self.record_once(lambda s: s.app._register_error_handler(
+ self.name, code_or_exception, f))
diff --git a/app/lib/flask/cli.py b/app/lib/flask/cli.py
new file mode 100644
index 0000000..074ee76
--- /dev/null
+++ b/app/lib/flask/cli.py
@@ -0,0 +1,517 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.cli
+ ~~~~~~~~~
+
+ A simple command line application to run flask apps.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import sys
+from threading import Lock, Thread
+from functools import update_wrapper
+
+import click
+
+from ._compat import iteritems, reraise
+from .helpers import get_debug_flag
+from . import __version__
+
+class NoAppException(click.UsageError):
+ """Raised if an application cannot be found or loaded."""
+
+
+def find_best_app(module):
+ """Given a module instance this tries to find the best possible
+ application in the module or raises an exception.
+ """
+ from . import Flask
+
+ # Search for the most common names first.
+ for attr_name in 'app', 'application':
+ app = getattr(module, attr_name, None)
+ if app is not None and isinstance(app, Flask):
+ return app
+
+ # Otherwise find the only object that is a Flask instance.
+ matches = [v for k, v in iteritems(module.__dict__)
+ if isinstance(v, Flask)]
+
+ if len(matches) == 1:
+ return matches[0]
+ raise NoAppException('Failed to find application in module "%s". Are '
+ 'you sure it contains a Flask application? Maybe '
+ 'you wrapped it in a WSGI middleware or you are '
+ 'using a factory function.' % module.__name__)
+
+
+def prepare_exec_for_file(filename):
+ """Given a filename this will try to calculate the python path, add it
+ to the search path and return the actual module name that is expected.
+ """
+ module = []
+
+ # Chop off file extensions or package markers
+ if os.path.split(filename)[1] == '__init__.py':
+ filename = os.path.dirname(filename)
+ elif filename.endswith('.py'):
+ filename = filename[:-3]
+ else:
+ raise NoAppException('The file provided (%s) does exist but is not a '
+ 'valid Python file. This means that it cannot '
+ 'be used as application. Please change the '
+ 'extension to .py' % filename)
+ filename = os.path.realpath(filename)
+
+ dirpath = filename
+ while 1:
+ dirpath, extra = os.path.split(dirpath)
+ module.append(extra)
+ if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
+ break
+
+ sys.path.insert(0, dirpath)
+ return '.'.join(module[::-1])
+
+
+def locate_app(app_id):
+ """Attempts to locate the application."""
+ __traceback_hide__ = True
+ if ':' in app_id:
+ module, app_obj = app_id.split(':', 1)
+ else:
+ module = app_id
+ app_obj = None
+
+ try:
+ __import__(module)
+ except ImportError:
+ # Reraise the ImportError if it occurred within the imported module.
+ # Determine this by checking whether the trace has a depth > 1.
+ if sys.exc_info()[-1].tb_next:
+ raise
+ else:
+ raise NoAppException('The file/path provided (%s) does not appear'
+ ' to exist. Please verify the path is '
+ 'correct. If app is not on PYTHONPATH, '
+ 'ensure the extension is .py' % module)
+
+ mod = sys.modules[module]
+ if app_obj is None:
+ app = find_best_app(mod)
+ else:
+ app = getattr(mod, app_obj, None)
+ if app is None:
+ raise RuntimeError('Failed to find application in module "%s"'
+ % module)
+
+ return app
+
+
+def find_default_import_path():
+ app = os.environ.get('FLASK_APP')
+ if app is None:
+ return
+ if os.path.isfile(app):
+ return prepare_exec_for_file(app)
+ return app
+
+
+def get_version(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ message = 'Flask %(version)s\nPython %(python_version)s'
+ click.echo(message % {
+ 'version': __version__,
+ 'python_version': sys.version,
+ }, color=ctx.color)
+ ctx.exit()
+
+version_option = click.Option(['--version'],
+ help='Show the flask version',
+ expose_value=False,
+ callback=get_version,
+ is_flag=True, is_eager=True)
+
+class DispatchingApp(object):
+ """Special application that dispatches to a Flask application which
+ is imported by name in a background thread. If an error happens
+ it is recorded and shown as part of the WSGI handling which in case
+ of the Werkzeug debugger means that it shows up in the browser.
+ """
+
+ def __init__(self, loader, use_eager_loading=False):
+ self.loader = loader
+ self._app = None
+ self._lock = Lock()
+ self._bg_loading_exc_info = None
+ if use_eager_loading:
+ self._load_unlocked()
+ else:
+ self._load_in_background()
+
+ def _load_in_background(self):
+ def _load_app():
+ __traceback_hide__ = True
+ with self._lock:
+ try:
+ self._load_unlocked()
+ except Exception:
+ self._bg_loading_exc_info = sys.exc_info()
+ t = Thread(target=_load_app, args=())
+ t.start()
+
+ def _flush_bg_loading_exception(self):
+ __traceback_hide__ = True
+ exc_info = self._bg_loading_exc_info
+ if exc_info is not None:
+ self._bg_loading_exc_info = None
+ reraise(*exc_info)
+
+ def _load_unlocked(self):
+ __traceback_hide__ = True
+ self._app = rv = self.loader()
+ self._bg_loading_exc_info = None
+ return rv
+
+ def __call__(self, environ, start_response):
+ __traceback_hide__ = True
+ if self._app is not None:
+ return self._app(environ, start_response)
+ self._flush_bg_loading_exception()
+ with self._lock:
+ if self._app is not None:
+ rv = self._app
+ else:
+ rv = self._load_unlocked()
+ return rv(environ, start_response)
+
+
+class ScriptInfo(object):
+ """Help object to deal with Flask applications. This is usually not
+ necessary to interface with as it's used internally in the dispatching
+ to click. In future versions of Flask this object will most likely play
+ a bigger role. Typically it's created automatically by the
+ :class:`FlaskGroup` but you can also manually create it and pass it
+ onwards as click object.
+ """
+
+ def __init__(self, app_import_path=None, create_app=None):
+ if create_app is None:
+ if app_import_path is None:
+ app_import_path = find_default_import_path()
+ self.app_import_path = app_import_path
+ else:
+ app_import_path = None
+
+ #: Optionally the import path for the Flask application.
+ self.app_import_path = app_import_path
+ #: Optionally a function that is passed the script info to create
+ #: the instance of the application.
+ self.create_app = create_app
+ #: A dictionary with arbitrary data that can be associated with
+ #: this script info.
+ self.data = {}
+ self._loaded_app = None
+
+ def load_app(self):
+ """Loads the Flask app (if not yet loaded) and returns it. Calling
+ this multiple times will just result in the already loaded app to
+ be returned.
+ """
+ __traceback_hide__ = True
+ if self._loaded_app is not None:
+ return self._loaded_app
+ if self.create_app is not None:
+ rv = self.create_app(self)
+ else:
+ if not self.app_import_path:
+ raise NoAppException(
+ 'Could not locate Flask application. You did not provide '
+ 'the FLASK_APP environment variable.\n\nFor more '
+ 'information see '
+ 'http://flask.pocoo.org/docs/latest/quickstart/')
+ rv = locate_app(self.app_import_path)
+ debug = get_debug_flag()
+ if debug is not None:
+ rv.debug = debug
+ self._loaded_app = rv
+ return rv
+
+
+pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
+
+
+def with_appcontext(f):
+ """Wraps a callback so that it's guaranteed to be executed with the
+ script's application context. If callbacks are registered directly
+ to the ``app.cli`` object then they are wrapped with this function
+ by default unless it's disabled.
+ """
+ @click.pass_context
+ def decorator(__ctx, *args, **kwargs):
+ with __ctx.ensure_object(ScriptInfo).load_app().app_context():
+ return __ctx.invoke(f, *args, **kwargs)
+ return update_wrapper(decorator, f)
+
+
+class AppGroup(click.Group):
+ """This works similar to a regular click :class:`~click.Group` but it
+ changes the behavior of the :meth:`command` decorator so that it
+ automatically wraps the functions in :func:`with_appcontext`.
+
+ Not to be confused with :class:`FlaskGroup`.
+ """
+
+ def command(self, *args, **kwargs):
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
+ unless it's disabled by passing ``with_appcontext=False``.
+ """
+ wrap_for_ctx = kwargs.pop('with_appcontext', True)
+ def decorator(f):
+ if wrap_for_ctx:
+ f = with_appcontext(f)
+ return click.Group.command(self, *args, **kwargs)(f)
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it defaults the group class to
+ :class:`AppGroup`.
+ """
+ kwargs.setdefault('cls', AppGroup)
+ return click.Group.group(self, *args, **kwargs)
+
+
+class FlaskGroup(AppGroup):
+ """Special subclass of the :class:`AppGroup` group that supports
+ loading more commands from the configured Flask app. Normally a
+ developer does not have to interface with this class but there are
+ some very advanced use cases for which it makes sense to create an
+ instance of this.
+
+ For information as of why this is useful see :ref:`custom-scripts`.
+
+ :param add_default_commands: if this is True then the default run and
+ shell commands wil be added.
+ :param add_version_option: adds the ``--version`` option.
+ :param create_app: an optional callback that is passed the script info
+ and returns the loaded app.
+ """
+
+ def __init__(self, add_default_commands=True, create_app=None,
+ add_version_option=True, **extra):
+ params = list(extra.pop('params', None) or ())
+
+ if add_version_option:
+ params.append(version_option)
+
+ AppGroup.__init__(self, params=params, **extra)
+ self.create_app = create_app
+
+ if add_default_commands:
+ self.add_command(run_command)
+ self.add_command(shell_command)
+
+ self._loaded_plugin_commands = False
+
+ def _load_plugin_commands(self):
+ if self._loaded_plugin_commands:
+ return
+ try:
+ import pkg_resources
+ except ImportError:
+ self._loaded_plugin_commands = True
+ return
+
+ for ep in pkg_resources.iter_entry_points('flask.commands'):
+ self.add_command(ep.load(), ep.name)
+ self._loaded_plugin_commands = True
+
+ def get_command(self, ctx, name):
+ self._load_plugin_commands()
+
+ # We load built-in commands first as these should always be the
+ # same no matter what the app does. If the app does want to
+ # override this it needs to make a custom instance of this group
+ # and not attach the default commands.
+ #
+ # This also means that the script stays functional in case the
+ # application completely fails.
+ rv = AppGroup.get_command(self, ctx, name)
+ if rv is not None:
+ return rv
+
+ info = ctx.ensure_object(ScriptInfo)
+ try:
+ rv = info.load_app().cli.get_command(ctx, name)
+ if rv is not None:
+ return rv
+ except NoAppException:
+ pass
+
+ def list_commands(self, ctx):
+ self._load_plugin_commands()
+
+ # The commands available is the list of both the application (if
+ # available) plus the builtin commands.
+ rv = set(click.Group.list_commands(self, ctx))
+ info = ctx.ensure_object(ScriptInfo)
+ try:
+ rv.update(info.load_app().cli.list_commands(ctx))
+ except Exception:
+ # Here we intentionally swallow all exceptions as we don't
+ # want the help page to break if the app does not exist.
+ # If someone attempts to use the command we try to create
+ # the app again and this will give us the error.
+ pass
+ return sorted(rv)
+
+ def main(self, *args, **kwargs):
+ obj = kwargs.get('obj')
+ if obj is None:
+ obj = ScriptInfo(create_app=self.create_app)
+ kwargs['obj'] = obj
+ kwargs.setdefault('auto_envvar_prefix', 'FLASK')
+ return AppGroup.main(self, *args, **kwargs)
+
+
+@click.command('run', short_help='Runs a development server.')
+@click.option('--host', '-h', default='127.0.0.1',
+ help='The interface to bind to.')
+@click.option('--port', '-p', default=5000,
+ help='The port to bind to.')
+@click.option('--reload/--no-reload', default=None,
+ help='Enable or disable the reloader. By default the reloader '
+ 'is active if debug is enabled.')
+@click.option('--debugger/--no-debugger', default=None,
+ help='Enable or disable the debugger. By default the debugger '
+ 'is active if debug is enabled.')
+@click.option('--eager-loading/--lazy-loader', default=None,
+ help='Enable or disable eager loading. By default eager '
+ 'loading is enabled if the reloader is disabled.')
+@click.option('--with-threads/--without-threads', default=False,
+ help='Enable or disable multithreading.')
+@pass_script_info
+def run_command(info, host, port, reload, debugger, eager_loading,
+ with_threads):
+ """Runs a local development server for the Flask application.
+
+ This local server is recommended for development purposes only but it
+ can also be used for simple intranet deployments. By default it will
+ not support any sort of concurrency at all to simplify debugging. This
+ can be changed with the --with-threads option which will enable basic
+ multithreading.
+
+ The reloader and debugger are by default enabled if the debug flag of
+ Flask is enabled and disabled otherwise.
+ """
+ from werkzeug.serving import run_simple
+
+ debug = get_debug_flag()
+ if reload is None:
+ reload = bool(debug)
+ if debugger is None:
+ debugger = bool(debug)
+ if eager_loading is None:
+ eager_loading = not reload
+
+ app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
+
+ # Extra startup messages. This depends a bit on Werkzeug internals to
+ # not double execute when the reloader kicks in.
+ if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
+ # If we have an import path we can print it out now which can help
+ # people understand what's being served. If we do not have an
+ # import path because the app was loaded through a callback then
+ # we won't print anything.
+ if info.app_import_path is not None:
+ print(' * Serving Flask app "%s"' % info.app_import_path)
+ if debug is not None:
+ print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
+
+ run_simple(host, port, app, use_reloader=reload,
+ use_debugger=debugger, threaded=with_threads)
+
+
+@click.command('shell', short_help='Runs a shell in the app context.')
+@with_appcontext
+def shell_command():
+ """Runs an interactive Python shell in the context of a given
+ Flask application. The application will populate the default
+ namespace of this shell according to it's configuration.
+
+ This is useful for executing small snippets of management code
+ without having to manually configuring the application.
+ """
+ import code
+ from flask.globals import _app_ctx_stack
+ app = _app_ctx_stack.top.app
+ banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
+ sys.version,
+ sys.platform,
+ app.import_name,
+ app.debug and ' [debug]' or '',
+ app.instance_path,
+ )
+ ctx = {}
+
+ # Support the regular Python interpreter startup script if someone
+ # is using it.
+ startup = os.environ.get('PYTHONSTARTUP')
+ if startup and os.path.isfile(startup):
+ with open(startup, 'r') as f:
+ eval(compile(f.read(), startup, 'exec'), ctx)
+
+ ctx.update(app.make_shell_context())
+
+ code.interact(banner=banner, local=ctx)
+
+
+cli = FlaskGroup(help="""\
+This shell command acts as general utility script for Flask applications.
+
+It loads the application configured (through the FLASK_APP environment
+variable) and then provides commands either provided by the application or
+Flask itself.
+
+The most useful commands are the "run" and "shell" command.
+
+Example usage:
+
+\b
+ %(prefix)s%(cmd)s FLASK_APP=hello.py
+ %(prefix)s%(cmd)s FLASK_DEBUG=1
+ %(prefix)sflask run
+""" % {
+ 'cmd': os.name == 'posix' and 'export' or 'set',
+ 'prefix': os.name == 'posix' and '$ ' or '',
+})
+
+
+def main(as_module=False):
+ this_module = __package__ + '.cli'
+ args = sys.argv[1:]
+
+ if as_module:
+ if sys.version_info >= (2, 7):
+ name = 'python -m ' + this_module.rsplit('.', 1)[0]
+ else:
+ name = 'python -m ' + this_module
+
+ # This module is always executed as "python -m flask.run" and as such
+ # we need to ensure that we restore the actual command line so that
+ # the reloader can properly operate.
+ sys.argv = ['-m', this_module] + sys.argv[1:]
+ else:
+ name = None
+
+ cli.main(args=args, prog_name=name)
+
+
+if __name__ == '__main__':
+ main(as_module=True)
diff --git a/app/lib/flask/config.py b/app/lib/flask/config.py
new file mode 100644
index 0000000..697add7
--- /dev/null
+++ b/app/lib/flask/config.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.config
+ ~~~~~~~~~~~~
+
+ Implements the configuration related objects.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import types
+import errno
+
+from werkzeug.utils import import_string
+from ._compat import string_types, iteritems
+from . import json
+
+
+class ConfigAttribute(object):
+ """Makes an attribute forward to the config"""
+
+ def __init__(self, name, get_converter=None):
+ self.__name__ = name
+ self.get_converter = get_converter
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ rv = obj.config[self.__name__]
+ if self.get_converter is not None:
+ rv = self.get_converter(rv)
+ return rv
+
+ def __set__(self, obj, value):
+ obj.config[self.__name__] = value
+
+
+class Config(dict):
+ """Works exactly like a dict but provides ways to fill it from files
+ or special dictionaries. There are two common patterns to populate the
+ config.
+
+ Either you can fill the config from a config file::
+
+ app.config.from_pyfile('yourconfig.cfg')
+
+ Or alternatively you can define the configuration options in the
+ module that calls :meth:`from_object` or provide an import path to
+ a module that should be loaded. It is also possible to tell it to
+ use the same module and with that provide the configuration values
+ just before the call::
+
+ DEBUG = True
+ SECRET_KEY = 'development key'
+ app.config.from_object(__name__)
+
+ In both cases (loading from any Python file or loading from modules),
+ only uppercase keys are added to the config. This makes it possible to use
+ lowercase values in the config file for temporary values that are not added
+ to the config or to define the config keys in the same file that implements
+ the application.
+
+ Probably the most interesting way to load configurations is from an
+ environment variable pointing to a file::
+
+ app.config.from_envvar('YOURAPPLICATION_SETTINGS')
+
+ In this case before launching the application you have to set this
+ environment variable to the file you want to use. On Linux and OS X
+ use the export statement::
+
+ export YOURAPPLICATION_SETTINGS='/path/to/config/file'
+
+ On windows use `set` instead.
+
+ :param root_path: path to which files are read relative from. When the
+ config object is created by the application, this is
+ the application's :attr:`~flask.Flask.root_path`.
+ :param defaults: an optional dictionary of default values
+ """
+
+ def __init__(self, root_path, defaults=None):
+ dict.__init__(self, defaults or {})
+ self.root_path = root_path
+
+ def from_envvar(self, variable_name, silent=False):
+ """Loads a configuration from an environment variable pointing to
+ a configuration file. This is basically just a shortcut with nicer
+ error messages for this line of code::
+
+ app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
+
+ :param variable_name: name of the environment variable
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: bool. ``True`` if able to load config, ``False`` otherwise.
+ """
+ rv = os.environ.get(variable_name)
+ if not rv:
+ if silent:
+ return False
+ raise RuntimeError('The environment variable %r is not set '
+ 'and as such configuration could not be '
+ 'loaded. Set this variable and make it '
+ 'point to a configuration file' %
+ variable_name)
+ return self.from_pyfile(rv, silent=silent)
+
+ def from_pyfile(self, filename, silent=False):
+ """Updates the values in the config from a Python file. This function
+ behaves as if the file was imported as module with the
+ :meth:`from_object` function.
+
+ :param filename: the filename of the config. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+
+ .. versionadded:: 0.7
+ `silent` parameter.
+ """
+ filename = os.path.join(self.root_path, filename)
+ d = types.ModuleType('config')
+ d.__file__ = filename
+ try:
+ with open(filename, mode='rb') as config_file:
+ exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
+ except IOError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return False
+ e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+ raise
+ self.from_object(d)
+ return True
+
+ def from_object(self, obj):
+ """Updates the values from the given object. An object can be of one
+ of the following two types:
+
+ - a string: in this case the object with that name will be imported
+ - an actual object reference: that object is used directly
+
+ Objects are usually either modules or classes. :meth:`from_object`
+ loads only the uppercase attributes of the module/class. A ``dict``
+ object will not work with :meth:`from_object` because the keys of a
+ ``dict`` are not attributes of the ``dict`` class.
+
+ Example of module-based configuration::
+
+ app.config.from_object('yourapplication.default_config')
+ from yourapplication import default_config
+ app.config.from_object(default_config)
+
+ You should not use this function to load the actual configuration but
+ rather configuration defaults. The actual config should be loaded
+ with :meth:`from_pyfile` and ideally from a location not within the
+ package because the package might be installed system wide.
+
+ See :ref:`config-dev-prod` for an example of class-based configuration
+ using :meth:`from_object`.
+
+ :param obj: an import name or object
+ """
+ if isinstance(obj, string_types):
+ obj = import_string(obj)
+ for key in dir(obj):
+ if key.isupper():
+ self[key] = getattr(obj, key)
+
+ def from_json(self, filename, silent=False):
+ """Updates the values in the config from a JSON file. This function
+ behaves as if the JSON object was a dictionary and passed to the
+ :meth:`from_mapping` function.
+
+ :param filename: the filename of the JSON file. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+
+ .. versionadded:: 0.11
+ """
+ filename = os.path.join(self.root_path, filename)
+
+ try:
+ with open(filename) as json_file:
+ obj = json.loads(json_file.read())
+ except IOError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return False
+ e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+ raise
+ return self.from_mapping(obj)
+
+ def from_mapping(self, *mapping, **kwargs):
+ """Updates the config like :meth:`update` ignoring items with non-upper
+ keys.
+
+ .. versionadded:: 0.11
+ """
+ mappings = []
+ if len(mapping) == 1:
+ if hasattr(mapping[0], 'items'):
+ mappings.append(mapping[0].items())
+ else:
+ mappings.append(mapping[0])
+ elif len(mapping) > 1:
+ raise TypeError(
+ 'expected at most 1 positional argument, got %d' % len(mapping)
+ )
+ mappings.append(kwargs.items())
+ for mapping in mappings:
+ for (key, value) in mapping:
+ if key.isupper():
+ self[key] = value
+ return True
+
+ def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
+ """Returns a dictionary containing a subset of configuration options
+ that match the specified namespace/prefix. Example usage::
+
+ app.config['IMAGE_STORE_TYPE'] = 'fs'
+ app.config['IMAGE_STORE_PATH'] = '/var/app/images'
+ app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
+ image_store_config = app.config.get_namespace('IMAGE_STORE_')
+
+ The resulting dictionary `image_store_config` would look like::
+
+ {
+ 'type': 'fs',
+ 'path': '/var/app/images',
+ 'base_url': 'http://img.website.com'
+ }
+
+ This is often useful when configuration options map directly to
+ keyword arguments in functions or class constructors.
+
+ :param namespace: a configuration namespace
+ :param lowercase: a flag indicating if the keys of the resulting
+ dictionary should be lowercase
+ :param trim_namespace: a flag indicating if the keys of the resulting
+ dictionary should not include the namespace
+
+ .. versionadded:: 0.11
+ """
+ rv = {}
+ for k, v in iteritems(self):
+ if not k.startswith(namespace):
+ continue
+ if trim_namespace:
+ key = k[len(namespace):]
+ else:
+ key = k
+ if lowercase:
+ key = key.lower()
+ rv[key] = v
+ return rv
+
+ def __repr__(self):
+ return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
diff --git a/src/lib/flask/ctx.py b/app/lib/flask/ctx.py
similarity index 81%
rename from src/lib/flask/ctx.py
rename to app/lib/flask/ctx.py
index f134237..480d9c5 100644
--- a/src/lib/flask/ctx.py
+++ b/app/lib/flask/ctx.py
@@ -5,20 +5,22 @@
Implements the objects required to keep the context.
- :copyright: (c) 2011 by Armin Ronacher.
+ :copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
-from __future__ import with_statement
-
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
-from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
+from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
+
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
class _AppCtxGlobals(object):
@@ -27,6 +29,15 @@ class _AppCtxGlobals(object):
def get(self, name, default=None):
return self.__dict__.get(name, default)
+ def pop(self, name, default=_sentinel):
+ if default is _sentinel:
+ return self.__dict__.pop(name)
+ else:
+ return self.__dict__.pop(name, default)
+
+ def setdefault(self, name, default=None):
+ return self.__dict__.setdefault(name, default)
+
def __contains__(self, item):
return item in self.__dict__
@@ -163,17 +174,21 @@ def __init__(self, app):
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
- def pop(self, exc=None):
+ def pop(self, exc=_sentinel):
"""Pops the app context."""
- self._refcnt -= 1
- if self._refcnt <= 0:
- if exc is None:
- exc = sys.exc_info()[1]
- self.app.do_teardown_appcontext(exc)
- rv = _app_ctx_stack.pop()
+ try:
+ self._refcnt -= 1
+ if self._refcnt <= 0:
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_appcontext(exc)
+ finally:
+ rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
@@ -185,6 +200,9 @@ def __enter__(self):
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
+ if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+ reraise(exc_type, exc_value, tb)
+
class RequestContext(object):
"""The request context contains all request relevant information. It is
@@ -204,8 +222,8 @@ class RequestContext(object):
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
- that did not fail and outside of `DEBUG` mode. By setting
- ``'flask._preserve_context'`` to `True` on the WSGI environment the
+ that did not fail and outside of ``DEBUG`` mode. By setting
+ ``'flask._preserve_context'`` to ``True`` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
@@ -246,16 +264,6 @@ def __init__(self, app, environ, request=None):
self.match_request()
- # XXX: Support for deprecated functionality. This is going away with
- # Flask 1.0
- blueprint = self.request.blueprint
- if blueprint is not None:
- # better safe than sorry, we don't want to break code that
- # already worked
- bp = app.blueprints.get(blueprint)
- if bp is not None and blueprint_is_module(bp):
- self.request._is_old_module = True
-
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
@@ -296,7 +304,7 @@ def push(self):
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
- # memory. This is usually only a problem in testsuite since this
+ # memory. This is usually only a problem in test suite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
@@ -312,6 +320,9 @@ def push(self):
else:
self._implicit_app_ctx_stack.append(None)
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
+
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
@@ -322,7 +333,7 @@ def push(self):
if self.session is None:
self.session = self.app.make_null_session()
- def pop(self, exc=None):
+ def pop(self, exc=_sentinel):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
@@ -332,38 +343,40 @@ def pop(self, exc=None):
"""
app_ctx = self._implicit_app_ctx_stack.pop()
- clear_request = False
- if not self._implicit_app_ctx_stack:
- self.preserved = False
- self._preserved_exc = None
- if exc is None:
- exc = sys.exc_info()[1]
- self.app.do_teardown_request(exc)
-
- # If this interpreter supports clearing the exception information
- # we do that now. This will only go into effect on Python 2.x,
- # on 3.x it disappears automatically at the end of the exception
- # stack.
- if hasattr(sys, 'exc_clear'):
- sys.exc_clear()
-
- request_close = getattr(self.request, 'close', None)
- if request_close is not None:
- request_close()
- clear_request = True
-
- rv = _request_ctx_stack.pop()
- assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
- % (rv, self)
-
- # get rid of circular dependencies at the end of the request
- # so that we don't require the GC to be active.
- if clear_request:
- rv.request.environ['werkzeug.request'] = None
-
- # Get rid of the app as well if necessary.
- if app_ctx is not None:
- app_ctx.pop(exc)
+ try:
+ clear_request = False
+ if not self._implicit_app_ctx_stack:
+ self.preserved = False
+ self._preserved_exc = None
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_request(exc)
+
+ # If this interpreter supports clearing the exception information
+ # we do that now. This will only go into effect on Python 2.x,
+ # on 3.x it disappears automatically at the end of the exception
+ # stack.
+ if hasattr(sys, 'exc_clear'):
+ sys.exc_clear()
+
+ request_close = getattr(self.request, 'close', None)
+ if request_close is not None:
+ request_close()
+ clear_request = True
+ finally:
+ rv = _request_ctx_stack.pop()
+
+ # get rid of circular dependencies at the end of the request
+ # so that we don't require the GC to be active.
+ if clear_request:
+ rv.request.environ['werkzeug.request'] = None
+
+ # Get rid of the app as well if necessary.
+ if app_ctx is not None:
+ app_ctx.pop(exc)
+
+ assert rv is self, 'Popped wrong request context. ' \
+ '(%r instead of %r)' % (rv, self)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
@@ -385,6 +398,9 @@ def __exit__(self, exc_type, exc_value, tb):
# See flask.testing for how this works.
self.auto_pop(exc_value)
+ if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+ reraise(exc_type, exc_value, tb)
+
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
diff --git a/app/lib/flask/debughelpers.py b/app/lib/flask/debughelpers.py
new file mode 100644
index 0000000..90710dd
--- /dev/null
+++ b/app/lib/flask/debughelpers.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.debughelpers
+ ~~~~~~~~~~~~~~~~~~
+
+ Various helpers to make the development experience better.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+from ._compat import implements_to_string, text_type
+from .app import Flask
+from .blueprints import Blueprint
+from .globals import _request_ctx_stack
+
+
+class UnexpectedUnicodeError(AssertionError, UnicodeError):
+ """Raised in places where we want some better error reporting for
+ unexpected unicode or binary data.
+ """
+
+
+@implements_to_string
+class DebugFilesKeyError(KeyError, AssertionError):
+ """Raised from request.files during debugging. The idea is that it can
+ provide a better error message than just a generic KeyError/BadRequest.
+ """
+
+ def __init__(self, request, key):
+ form_matches = request.form.getlist(key)
+ buf = ['You tried to access the file "%s" in the request.files '
+ 'dictionary but it does not exist. The mimetype for the request '
+ 'is "%s" instead of "multipart/form-data" which means that no '
+ 'file contents were transmitted. To fix this error you should '
+ 'provide enctype="multipart/form-data" in your form.' %
+ (key, request.mimetype)]
+ if form_matches:
+ buf.append('\n\nThe browser instead transmitted some file names. '
+ 'This was submitted: %s' % ', '.join('"%s"' % x
+ for x in form_matches))
+ self.msg = ''.join(buf)
+
+ def __str__(self):
+ return self.msg
+
+
+class FormDataRoutingRedirect(AssertionError):
+ """This exception is raised by Flask in debug mode if it detects a
+ redirect caused by the routing system when the request method is not
+ GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
+ """
+
+ def __init__(self, request):
+ exc = request.routing_exception
+ buf = ['A request was sent to this URL (%s) but a redirect was '
+ 'issued automatically by the routing system to "%s".'
+ % (request.url, exc.new_url)]
+
+ # In case just a slash was appended we can be extra helpful
+ if request.base_url + '/' == exc.new_url.split('?')[0]:
+ buf.append(' The URL was defined with a trailing slash so '
+ 'Flask will automatically redirect to the URL '
+ 'with the trailing slash if it was accessed '
+ 'without one.')
+
+ buf.append(' Make sure to directly send your %s-request to this URL '
+ 'since we can\'t make browsers or HTTP clients redirect '
+ 'with form data reliably or without user interaction.' %
+ request.method)
+ buf.append('\n\nNote: this exception is only raised in debug mode')
+ AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
+
+
+def attach_enctype_error_multidict(request):
+ """Since Flask 0.8 we're monkeypatching the files object in case a
+ request is detected that does not use multipart form data but the files
+ object is accessed.
+ """
+ oldcls = request.files.__class__
+ class newcls(oldcls):
+ def __getitem__(self, key):
+ try:
+ return oldcls.__getitem__(self, key)
+ except KeyError:
+ if key not in request.form:
+ raise
+ raise DebugFilesKeyError(request, key)
+ newcls.__name__ = oldcls.__name__
+ newcls.__module__ = oldcls.__module__
+ request.files.__class__ = newcls
+
+
+def _dump_loader_info(loader):
+ yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
+ for key, value in sorted(loader.__dict__.items()):
+ if key.startswith('_'):
+ continue
+ if isinstance(value, (tuple, list)):
+ if not all(isinstance(x, (str, text_type)) for x in value):
+ continue
+ yield '%s:' % key
+ for item in value:
+ yield ' - %s' % item
+ continue
+ elif not isinstance(value, (str, text_type, int, float, bool)):
+ continue
+ yield '%s: %r' % (key, value)
+
+
+def explain_template_loading_attempts(app, template, attempts):
+ """This should help developers understand what failed"""
+ info = ['Locating template "%s":' % template]
+ total_found = 0
+ blueprint = None
+ reqctx = _request_ctx_stack.top
+ if reqctx is not None and reqctx.request.blueprint is not None:
+ blueprint = reqctx.request.blueprint
+
+ for idx, (loader, srcobj, triple) in enumerate(attempts):
+ if isinstance(srcobj, Flask):
+ src_info = 'application "%s"' % srcobj.import_name
+ elif isinstance(srcobj, Blueprint):
+ src_info = 'blueprint "%s" (%s)' % (srcobj.name,
+ srcobj.import_name)
+ else:
+ src_info = repr(srcobj)
+
+ info.append('% 5d: trying loader of %s' % (
+ idx + 1, src_info))
+
+ for line in _dump_loader_info(loader):
+ info.append(' %s' % line)
+
+ if triple is None:
+ detail = 'no match'
+ else:
+ detail = 'found (%r)' % (triple[1] or '')
+ total_found += 1
+ info.append(' -> %s' % detail)
+
+ seems_fishy = False
+ if total_found == 0:
+ info.append('Error: the template could not be found.')
+ seems_fishy = True
+ elif total_found > 1:
+ info.append('Warning: multiple loaders returned a match for the template.')
+ seems_fishy = True
+
+ if blueprint is not None and seems_fishy:
+ info.append(' The template was looked up from an endpoint that '
+ 'belongs to the blueprint "%s".' % blueprint)
+ info.append(' Maybe you did not place a template in the right folder?')
+ info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
+
+ app.logger.info('\n'.join(info))
diff --git a/app/lib/flask/ext/__init__.py b/app/lib/flask/ext/__init__.py
new file mode 100644
index 0000000..051f44a
--- /dev/null
+++ b/app/lib/flask/ext/__init__.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.ext
+ ~~~~~~~~~
+
+ Redirect imports for extensions. This module basically makes it possible
+ for us to transition from flaskext.foo to flask_foo without having to
+ force all extensions to upgrade at the same time.
+
+ When a user does ``from flask.ext.foo import bar`` it will attempt to
+ import ``from flask_foo import bar`` first and when that fails it will
+ try to import ``from flaskext.foo import bar``.
+
+ We're switching from namespace packages because it was just too painful for
+ everybody involved.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+def setup():
+ from ..exthook import ExtensionImporter
+ importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__)
+ importer.install()
+
+
+setup()
+del setup
diff --git a/src/lib/flask/exthook.py b/app/lib/flask/exthook.py
similarity index 85%
rename from src/lib/flask/exthook.py
rename to app/lib/flask/exthook.py
index d0d814c..d884280 100644
--- a/src/lib/flask/exthook.py
+++ b/app/lib/flask/exthook.py
@@ -16,14 +16,21 @@
This is used by `flask.ext`.
- :copyright: (c) 2011 by Armin Ronacher.
+ :copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
+import warnings
from ._compat import reraise
+class ExtDeprecationWarning(DeprecationWarning):
+ pass
+
+warnings.simplefilter('always', ExtDeprecationWarning)
+
+
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
@@ -49,13 +56,21 @@ def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
- if fullname.startswith(self.prefix):
+ if fullname.startswith(self.prefix) and \
+ fullname != 'flask.ext.ExtDeprecationWarning':
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
+
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
+
+ warnings.warn(
+ "Importing flask.ext.{x} is deprecated, use flask_{x} instead."
+ .format(x=modname), ExtDeprecationWarning, stacklevel=2
+ )
+
for path in self.module_choices:
realname = path % modname
try:
@@ -83,6 +98,14 @@ def load_module(self, fullname):
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
+
+ if realname.startswith('flaskext.'):
+ warnings.warn(
+ "Detected extension named flaskext.{x}, please rename it "
+ "to flask_{x}. The old form is deprecated."
+ .format(x=modname), ExtDeprecationWarning
+ )
+
return module
raise ImportError('No module named %s' % fullname)
@@ -111,7 +134,7 @@ def is_important_frame(self, important_module, tb):
if module_name == important_module:
return True
- # Some python versions will will clean up modules so early that the
+ # Some python versions will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
diff --git a/app/lib/flask/globals.py b/app/lib/flask/globals.py
new file mode 100644
index 0000000..0b70a3e
--- /dev/null
+++ b/app/lib/flask/globals.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.globals
+ ~~~~~~~~~~~~~
+
+ Defines all the global objects that are proxies to the current
+ active context.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+from functools import partial
+from werkzeug.local import LocalStack, LocalProxy
+
+
+_request_ctx_err_msg = '''\
+Working outside of request context.
+
+This typically means that you attempted to use functionality that needed
+an active HTTP request. Consult the documentation on testing for
+information about how to avoid this problem.\
+'''
+_app_ctx_err_msg = '''\
+Working outside of application context.
+
+This typically means that you attempted to use functionality that needed
+to interface with the current application object in a way. To solve
+this set up an application context with app.app_context(). See the
+documentation for more information.\
+'''
+
+
+def _lookup_req_object(name):
+ top = _request_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_request_ctx_err_msg)
+ return getattr(top, name)
+
+
+def _lookup_app_object(name):
+ top = _app_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_app_ctx_err_msg)
+ return getattr(top, name)
+
+
+def _find_app():
+ top = _app_ctx_stack.top
+ if top is None:
+ raise RuntimeError(_app_ctx_err_msg)
+ return top.app
+
+
+# context locals
+_request_ctx_stack = LocalStack()
+_app_ctx_stack = LocalStack()
+current_app = LocalProxy(_find_app)
+request = LocalProxy(partial(_lookup_req_object, 'request'))
+session = LocalProxy(partial(_lookup_req_object, 'session'))
+g = LocalProxy(partial(_lookup_app_object, 'g'))
diff --git a/app/lib/flask/helpers.py b/app/lib/flask/helpers.py
new file mode 100644
index 0000000..c6c2cdd
--- /dev/null
+++ b/app/lib/flask/helpers.py
@@ -0,0 +1,960 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.helpers
+ ~~~~~~~~~~~~~
+
+ Implements various helpers.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import sys
+import pkgutil
+import posixpath
+import mimetypes
+from time import time
+from zlib import adler32
+from threading import RLock
+from werkzeug.routing import BuildError
+from functools import update_wrapper
+
+try:
+ from werkzeug.urls import url_quote
+except ImportError:
+ from urlparse import quote as url_quote
+
+from werkzeug.datastructures import Headers, Range
+from werkzeug.exceptions import BadRequest, NotFound, \
+ RequestedRangeNotSatisfiable
+
+# this was moved in 0.7
+try:
+ from werkzeug.wsgi import wrap_file
+except ImportError:
+ from werkzeug.utils import wrap_file
+
+from jinja2 import FileSystemLoader
+
+from .signals import message_flashed
+from .globals import session, _request_ctx_stack, _app_ctx_stack, \
+ current_app, request
+from ._compat import string_types, text_type
+
+
+# sentinel
+_missing = object()
+
+
+# what separators does this operating system provide that are not a slash?
+# this is used by the send_from_directory function to ensure that nobody is
+# able to access files from outside the filesystem.
+_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
+ if sep not in (None, '/'))
+
+
+def get_debug_flag(default=None):
+ val = os.environ.get('FLASK_DEBUG')
+ if not val:
+ return default
+ return val not in ('0', 'false', 'no')
+
+
+def _endpoint_from_view_func(view_func):
+ """Internal helper that returns the default endpoint for a given
+ function. This always is the function name.
+ """
+ assert view_func is not None, 'expected view func if endpoint ' \
+ 'is not provided.'
+ return view_func.__name__
+
+
+def stream_with_context(generator_or_function):
+ """Request contexts disappear when the response is started on the server.
+ This is done for efficiency reasons and to make it less likely to encounter
+ memory leaks with badly written WSGI middlewares. The downside is that if
+ you are using streamed responses, the generator cannot access request bound
+ information any more.
+
+ This function however can help you keep the context around for longer::
+
+ from flask import stream_with_context, request, Response
+
+ @app.route('/stream')
+ def streamed_response():
+ @stream_with_context
+ def generate():
+ yield 'Hello '
+ yield request.args['name']
+ yield '!'
+ return Response(generate())
+
+ Alternatively it can also be used around a specific generator::
+
+ from flask import stream_with_context, request, Response
+
+ @app.route('/stream')
+ def streamed_response():
+ def generate():
+ yield 'Hello '
+ yield request.args['name']
+ yield '!'
+ return Response(stream_with_context(generate()))
+
+ .. versionadded:: 0.9
+ """
+ try:
+ gen = iter(generator_or_function)
+ except TypeError:
+ def decorator(*args, **kwargs):
+ gen = generator_or_function(*args, **kwargs)
+ return stream_with_context(gen)
+ return update_wrapper(decorator, generator_or_function)
+
+ def generator():
+ ctx = _request_ctx_stack.top
+ if ctx is None:
+ raise RuntimeError('Attempted to stream with context but '
+ 'there was no context in the first place to keep around.')
+ with ctx:
+ # Dummy sentinel. Has to be inside the context block or we're
+ # not actually keeping the context around.
+ yield None
+
+ # The try/finally is here so that if someone passes a WSGI level
+ # iterator in we're still running the cleanup logic. Generators
+ # don't need that because they are closed on their destruction
+ # automatically.
+ try:
+ for item in gen:
+ yield item
+ finally:
+ if hasattr(gen, 'close'):
+ gen.close()
+
+ # The trick is to start the generator. Then the code execution runs until
+ # the first dummy None is yielded at which point the context was already
+ # pushed. This item is discarded. Then when the iteration continues the
+ # real generator is executed.
+ wrapped_g = generator()
+ next(wrapped_g)
+ return wrapped_g
+
+
+def make_response(*args):
+ """Sometimes it is necessary to set additional headers in a view. Because
+ views do not have to return response objects but can return a value that
+ is converted into a response object by Flask itself, it becomes tricky to
+ add headers to it. This function can be called instead of using a return
+ and you will get a response object which you can use to attach headers.
+
+ If view looked like this and you want to add a new header::
+
+ def index():
+ return render_template('index.html', foo=42)
+
+ You can now do something like this::
+
+ def index():
+ response = make_response(render_template('index.html', foo=42))
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+ return response
+
+ This function accepts the very same arguments you can return from a
+ view function. This for example creates a response with a 404 error
+ code::
+
+ response = make_response(render_template('not_found.html'), 404)
+
+ The other use case of this function is to force the return value of a
+ view function into a response which is helpful with view
+ decorators::
+
+ response = make_response(view_function())
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+
+ Internally this function does the following things:
+
+ - if no arguments are passed, it creates a new response argument
+ - if one argument is passed, :meth:`flask.Flask.make_response`
+ is invoked with it.
+ - if more than one argument is passed, the arguments are passed
+ to the :meth:`flask.Flask.make_response` function as tuple.
+
+ .. versionadded:: 0.6
+ """
+ if not args:
+ return current_app.response_class()
+ if len(args) == 1:
+ args = args[0]
+ return current_app.make_response(args)
+
+
+def url_for(endpoint, **values):
+ """Generates a URL to the given endpoint with the method provided.
+
+ Variable arguments that are unknown to the target endpoint are appended
+ to the generated URL as query arguments. If the value of a query argument
+ is ``None``, the whole pair is skipped. In case blueprints are active
+ you can shortcut references to the same blueprint by prefixing the
+ local endpoint with a dot (``.``).
+
+ This will reference the index function local to the current blueprint::
+
+ url_for('.index')
+
+ For more information, head over to the :ref:`Quickstart `.
+
+ To integrate applications, :class:`Flask` has a hook to intercept URL build
+ errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
+ function results in a :exc:`~werkzeug.routing.BuildError` when the current
+ app does not have a URL for the given endpoint and values. When it does, the
+ :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
+ it is not ``None``, which can return a string to use as the result of
+ `url_for` (instead of `url_for`'s default to raise the
+ :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
+ An example::
+
+ def external_url_handler(error, endpoint, values):
+ "Looks up an external URL when `url_for` cannot build a URL."
+ # This is an example of hooking the build_error_handler.
+ # Here, lookup_url is some utility function you've built
+ # which looks up the endpoint in some external URL registry.
+ url = lookup_url(endpoint, **values)
+ if url is None:
+ # External lookup did not have a URL.
+ # Re-raise the BuildError, in context of original traceback.
+ exc_type, exc_value, tb = sys.exc_info()
+ if exc_value is error:
+ raise exc_type, exc_value, tb
+ else:
+ raise error
+ # url_for will use this result, instead of raising BuildError.
+ return url
+
+ app.url_build_error_handlers.append(external_url_handler)
+
+ Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
+ `endpoint` and `values` are the arguments passed into `url_for`. Note
+ that this is for building URLs outside the current application, and not for
+ handling 404 NotFound errors.
+
+ .. versionadded:: 0.10
+ The `_scheme` parameter was added.
+
+ .. versionadded:: 0.9
+ The `_anchor` and `_method` parameters were added.
+
+ .. versionadded:: 0.9
+ Calls :meth:`Flask.handle_build_error` on
+ :exc:`~werkzeug.routing.BuildError`.
+
+ :param endpoint: the endpoint of the URL (name of the function)
+ :param values: the variable arguments of the URL rule
+ :param _external: if set to ``True``, an absolute URL is generated. Server
+ address can be changed via ``SERVER_NAME`` configuration variable which
+ defaults to `localhost`.
+ :param _scheme: a string specifying the desired URL scheme. The `_external`
+ parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
+ behavior uses the same scheme as the current request, or
+ ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no
+ request context is available. As of Werkzeug 0.10, this also can be set
+ to an empty string to build protocol-relative URLs.
+ :param _anchor: if provided this is added as anchor to the URL.
+ :param _method: if provided this explicitly specifies an HTTP method.
+ """
+ appctx = _app_ctx_stack.top
+ reqctx = _request_ctx_stack.top
+ if appctx is None:
+ raise RuntimeError('Attempted to generate a URL without the '
+ 'application context being pushed. This has to be '
+ 'executed when application context is available.')
+
+ # If request specific information is available we have some extra
+ # features that support "relative" URLs.
+ if reqctx is not None:
+ url_adapter = reqctx.url_adapter
+ blueprint_name = request.blueprint
+ if not reqctx.request._is_old_module:
+ if endpoint[:1] == '.':
+ if blueprint_name is not None:
+ endpoint = blueprint_name + endpoint
+ else:
+ endpoint = endpoint[1:]
+ else:
+ # TODO: get rid of this deprecated functionality in 1.0
+ if '.' not in endpoint:
+ if blueprint_name is not None:
+ endpoint = blueprint_name + '.' + endpoint
+ elif endpoint.startswith('.'):
+ endpoint = endpoint[1:]
+ external = values.pop('_external', False)
+
+ # Otherwise go with the url adapter from the appctx and make
+ # the URLs external by default.
+ else:
+ url_adapter = appctx.url_adapter
+ if url_adapter is None:
+ raise RuntimeError('Application was not able to create a URL '
+ 'adapter for request independent URL generation. '
+ 'You might be able to fix this by setting '
+ 'the SERVER_NAME config variable.')
+ external = values.pop('_external', True)
+
+ anchor = values.pop('_anchor', None)
+ method = values.pop('_method', None)
+ scheme = values.pop('_scheme', None)
+ appctx.app.inject_url_defaults(endpoint, values)
+
+ # This is not the best way to deal with this but currently the
+ # underlying Werkzeug router does not support overriding the scheme on
+ # a per build call basis.
+ old_scheme = None
+ if scheme is not None:
+ if not external:
+ raise ValueError('When specifying _scheme, _external must be True')
+ old_scheme = url_adapter.url_scheme
+ url_adapter.url_scheme = scheme
+
+ try:
+ try:
+ rv = url_adapter.build(endpoint, values, method=method,
+ force_external=external)
+ finally:
+ if old_scheme is not None:
+ url_adapter.url_scheme = old_scheme
+ except BuildError as error:
+ # We need to inject the values again so that the app callback can
+ # deal with that sort of stuff.
+ values['_external'] = external
+ values['_anchor'] = anchor
+ values['_method'] = method
+ return appctx.app.handle_url_build_error(error, endpoint, values)
+
+ if anchor is not None:
+ rv += '#' + url_quote(anchor)
+ return rv
+
+
+def get_template_attribute(template_name, attribute):
+ """Loads a macro (or variable) a template exports. This can be used to
+ invoke a macro from within Python code. If you for example have a
+ template named :file:`_cider.html` with the following contents:
+
+ .. sourcecode:: html+jinja
+
+ {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
+
+ You can access this from Python code like this::
+
+ hello = get_template_attribute('_cider.html', 'hello')
+ return hello('World')
+
+ .. versionadded:: 0.2
+
+ :param template_name: the name of the template
+ :param attribute: the name of the variable of macro to access
+ """
+ return getattr(current_app.jinja_env.get_template(template_name).module,
+ attribute)
+
+
+def flash(message, category='message'):
+ """Flashes a message to the next request. In order to remove the
+ flashed message from the session and to display it to the user,
+ the template has to call :func:`get_flashed_messages`.
+
+ .. versionchanged:: 0.3
+ `category` parameter added.
+
+ :param message: the message to be flashed.
+ :param category: the category for the message. The following values
+ are recommended: ``'message'`` for any kind of message,
+ ``'error'`` for errors, ``'info'`` for information
+ messages and ``'warning'`` for warnings. However any
+ kind of string can be used as category.
+ """
+ # Original implementation:
+ #
+ # session.setdefault('_flashes', []).append((category, message))
+ #
+ # This assumed that changes made to mutable structures in the session are
+ # are always in sync with the session object, which is not true for session
+ # implementations that use external storage for keeping their keys/values.
+ flashes = session.get('_flashes', [])
+ flashes.append((category, message))
+ session['_flashes'] = flashes
+ message_flashed.send(current_app._get_current_object(),
+ message=message, category=category)
+
+
+def get_flashed_messages(with_categories=False, category_filter=[]):
+ """Pulls all flashed messages from the session and returns them.
+ Further calls in the same request to the function will return
+ the same messages. By default just the messages are returned,
+ but when `with_categories` is set to ``True``, the return value will
+ be a list of tuples in the form ``(category, message)`` instead.
+
+ Filter the flashed messages to one or more categories by providing those
+ categories in `category_filter`. This allows rendering categories in
+ separate html blocks. The `with_categories` and `category_filter`
+ arguments are distinct:
+
+ * `with_categories` controls whether categories are returned with message
+ text (``True`` gives a tuple, where ``False`` gives just the message text).
+ * `category_filter` filters the messages down to only those matching the
+ provided categories.
+
+ See :ref:`message-flashing-pattern` for examples.
+
+ .. versionchanged:: 0.3
+ `with_categories` parameter added.
+
+ .. versionchanged:: 0.9
+ `category_filter` parameter added.
+
+ :param with_categories: set to ``True`` to also receive categories.
+ :param category_filter: whitelist of categories to limit return values
+ """
+ flashes = _request_ctx_stack.top.flashes
+ if flashes is None:
+ _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
+ if '_flashes' in session else []
+ if category_filter:
+ flashes = list(filter(lambda f: f[0] in category_filter, flashes))
+ if not with_categories:
+ return [x[1] for x in flashes]
+ return flashes
+
+
+def send_file(filename_or_fp, mimetype=None, as_attachment=False,
+ attachment_filename=None, add_etags=True,
+ cache_timeout=None, conditional=False, last_modified=None):
+ """Sends the contents of a file to the client. This will use the
+ most efficient method available and configured. By default it will
+ try to use the WSGI server's file_wrapper support. Alternatively
+ you can set the application's :attr:`~Flask.use_x_sendfile` attribute
+ to ``True`` to directly emit an ``X-Sendfile`` header. This however
+ requires support of the underlying webserver for ``X-Sendfile``.
+
+ By default it will try to guess the mimetype for you, but you can
+ also explicitly provide one. For extra security you probably want
+ to send certain files as attachment (HTML for instance). The mimetype
+ guessing requires a `filename` or an `attachment_filename` to be
+ provided.
+
+ ETags will also be attached automatically if a `filename` is provided. You
+ can turn this off by setting `add_etags=False`.
+
+ If `conditional=True` and `filename` is provided, this method will try to
+ upgrade the response stream to support range requests. This will allow
+ the request to be answered with partial content response.
+
+ Please never pass filenames to this function from user sources;
+ you should use :func:`send_from_directory` instead.
+
+ .. versionadded:: 0.2
+
+ .. versionadded:: 0.5
+ The `add_etags`, `cache_timeout` and `conditional` parameters were
+ added. The default behavior is now to attach etags.
+
+ .. versionchanged:: 0.7
+ mimetype guessing and etag support for file objects was
+ deprecated because it was unreliable. Pass a filename if you are
+ able to, otherwise attach an etag yourself. This functionality
+ will be removed in Flask 1.0
+
+ .. versionchanged:: 0.9
+ cache_timeout pulls its default from application config, when None.
+
+ .. versionchanged:: 0.12
+ The filename is no longer automatically inferred from file objects. If
+ you want to use automatic mimetype and etag support, pass a filepath via
+ `filename_or_fp` or `attachment_filename`.
+
+ .. versionchanged:: 0.12
+ The `attachment_filename` is preferred over `filename` for MIME-type
+ detection.
+
+ :param filename_or_fp: the filename of the file to send in `latin-1`.
+ This is relative to the :attr:`~Flask.root_path`
+ if a relative path is specified.
+ Alternatively a file object might be provided in
+ which case ``X-Sendfile`` might not work and fall
+ back to the traditional method. Make sure that the
+ file pointer is positioned at the start of data to
+ send before calling :func:`send_file`.
+ :param mimetype: the mimetype of the file if provided. If a file path is
+ given, auto detection happens as fallback, otherwise an
+ error will be raised.
+ :param as_attachment: set to ``True`` if you want to send this file with
+ a ``Content-Disposition: attachment`` header.
+ :param attachment_filename: the filename for the attachment if it
+ differs from the file's filename.
+ :param add_etags: set to ``False`` to disable attaching of etags.
+ :param conditional: set to ``True`` to enable conditional responses.
+
+ :param cache_timeout: the timeout in seconds for the headers. When ``None``
+ (default), this value is set by
+ :meth:`~Flask.get_send_file_max_age` of
+ :data:`~flask.current_app`.
+ :param last_modified: set the ``Last-Modified`` header to this value,
+ a :class:`~datetime.datetime` or timestamp.
+ If a file was passed, this overrides its mtime.
+ """
+ mtime = None
+ fsize = None
+ if isinstance(filename_or_fp, string_types):
+ filename = filename_or_fp
+ if not os.path.isabs(filename):
+ filename = os.path.join(current_app.root_path, filename)
+ file = None
+ if attachment_filename is None:
+ attachment_filename = os.path.basename(filename)
+ else:
+ file = filename_or_fp
+ filename = None
+
+ if mimetype is None:
+ if attachment_filename is not None:
+ mimetype = mimetypes.guess_type(attachment_filename)[0] \
+ or 'application/octet-stream'
+
+ if mimetype is None:
+ raise ValueError(
+ 'Unable to infer MIME-type because no filename is available. '
+ 'Please set either `attachment_filename`, pass a filepath to '
+ '`filename_or_fp` or set your own MIME-type via `mimetype`.'
+ )
+
+ headers = Headers()
+ if as_attachment:
+ if attachment_filename is None:
+ raise TypeError('filename unavailable, required for '
+ 'sending as attachment')
+ headers.add('Content-Disposition', 'attachment',
+ filename=attachment_filename)
+
+ if current_app.use_x_sendfile and filename:
+ if file is not None:
+ file.close()
+ headers['X-Sendfile'] = filename
+ fsize = os.path.getsize(filename)
+ headers['Content-Length'] = fsize
+ data = None
+ else:
+ if file is None:
+ file = open(filename, 'rb')
+ mtime = os.path.getmtime(filename)
+ fsize = os.path.getsize(filename)
+ headers['Content-Length'] = fsize
+ data = wrap_file(request.environ, file)
+
+ rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
+ direct_passthrough=True)
+
+ if last_modified is not None:
+ rv.last_modified = last_modified
+ elif mtime is not None:
+ rv.last_modified = mtime
+
+ rv.cache_control.public = True
+ if cache_timeout is None:
+ cache_timeout = current_app.get_send_file_max_age(filename)
+ if cache_timeout is not None:
+ rv.cache_control.max_age = cache_timeout
+ rv.expires = int(time() + cache_timeout)
+
+ if add_etags and filename is not None:
+ from warnings import warn
+
+ try:
+ rv.set_etag('%s-%s-%s' % (
+ os.path.getmtime(filename),
+ os.path.getsize(filename),
+ adler32(
+ filename.encode('utf-8') if isinstance(filename, text_type)
+ else filename
+ ) & 0xffffffff
+ ))
+ except OSError:
+ warn('Access %s failed, maybe it does not exist, so ignore etags in '
+ 'headers' % filename, stacklevel=2)
+
+ if conditional:
+ if callable(getattr(Range, 'to_content_range_header', None)):
+ # Werkzeug supports Range Requests
+ # Remove this test when support for Werkzeug <0.12 is dropped
+ try:
+ rv = rv.make_conditional(request, accept_ranges=True,
+ complete_length=fsize)
+ except RequestedRangeNotSatisfiable:
+ file.close()
+ raise
+ else:
+ rv = rv.make_conditional(request)
+ # make sure we don't send x-sendfile for servers that
+ # ignore the 304 status code for x-sendfile.
+ if rv.status_code == 304:
+ rv.headers.pop('x-sendfile', None)
+ return rv
+
+
+def safe_join(directory, *pathnames):
+ """Safely join `directory` and zero or more untrusted `pathnames`
+ components.
+
+ Example usage::
+
+ @app.route('/wiki/')
+ def wiki_page(filename):
+ filename = safe_join(app.config['WIKI_FOLDER'], filename)
+ with open(filename, 'rb') as fd:
+ content = fd.read() # Read and process the file content...
+
+ :param directory: the trusted base directory.
+ :param pathnames: the untrusted pathnames relative to that directory.
+ :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed
+ paths fall out of its boundaries.
+ """
+ for filename in pathnames:
+ if filename != '':
+ filename = posixpath.normpath(filename)
+ for sep in _os_alt_seps:
+ if sep in filename:
+ raise NotFound()
+ if os.path.isabs(filename) or \
+ filename == '..' or \
+ filename.startswith('../'):
+ raise NotFound()
+ directory = os.path.join(directory, filename)
+ return directory
+
+
+def send_from_directory(directory, filename, **options):
+ """Send a file from a given directory with :func:`send_file`. This
+ is a secure way to quickly expose static files from an upload folder
+ or something similar.
+
+ Example usage::
+
+ @app.route('/uploads/')
+ def download_file(filename):
+ return send_from_directory(app.config['UPLOAD_FOLDER'],
+ filename, as_attachment=True)
+
+ .. admonition:: Sending files and Performance
+
+ It is strongly recommended to activate either ``X-Sendfile`` support in
+ your webserver or (if no authentication happens) to tell the webserver
+ to serve files for the given path on its own without calling into the
+ web application for improved performance.
+
+ .. versionadded:: 0.5
+
+ :param directory: the directory where all the files are stored.
+ :param filename: the filename relative to that directory to
+ download.
+ :param options: optional keyword arguments that are directly
+ forwarded to :func:`send_file`.
+ """
+ filename = safe_join(directory, filename)
+ if not os.path.isabs(filename):
+ filename = os.path.join(current_app.root_path, filename)
+ try:
+ if not os.path.isfile(filename):
+ raise NotFound()
+ except (TypeError, ValueError):
+ raise BadRequest()
+ options.setdefault('conditional', True)
+ return send_file(filename, **options)
+
+
+def get_root_path(import_name):
+ """Returns the path to a package or cwd if that cannot be found. This
+ returns the path of a package or the folder that contains a module.
+
+ Not to be confused with the package path returned by :func:`find_package`.
+ """
+ # Module already imported and has a file attribute. Use that first.
+ mod = sys.modules.get(import_name)
+ if mod is not None and hasattr(mod, '__file__'):
+ return os.path.dirname(os.path.abspath(mod.__file__))
+
+ # Next attempt: check the loader.
+ loader = pkgutil.get_loader(import_name)
+
+ # Loader does not exist or we're referring to an unloaded main module
+ # or a main module without path (interactive sessions), go with the
+ # current working directory.
+ if loader is None or import_name == '__main__':
+ return os.getcwd()
+
+ # For .egg, zipimporter does not have get_filename until Python 2.7.
+ # Some other loaders might exhibit the same behavior.
+ if hasattr(loader, 'get_filename'):
+ filepath = loader.get_filename(import_name)
+ else:
+ # Fall back to imports.
+ __import__(import_name)
+ mod = sys.modules[import_name]
+ filepath = getattr(mod, '__file__', None)
+
+ # If we don't have a filepath it might be because we are a
+ # namespace package. In this case we pick the root path from the
+ # first module that is contained in our package.
+ if filepath is None:
+ raise RuntimeError('No root path can be found for the provided '
+ 'module "%s". This can happen because the '
+ 'module came from an import hook that does '
+ 'not provide file name information or because '
+ 'it\'s a namespace package. In this case '
+ 'the root path needs to be explicitly '
+ 'provided.' % import_name)
+
+ # filepath is import_name.py for a module, or __init__.py for a package.
+ return os.path.dirname(os.path.abspath(filepath))
+
+
+def _matching_loader_thinks_module_is_package(loader, mod_name):
+ """Given the loader that loaded a module and the module this function
+ attempts to figure out if the given module is actually a package.
+ """
+ # If the loader can tell us if something is a package, we can
+ # directly ask the loader.
+ if hasattr(loader, 'is_package'):
+ return loader.is_package(mod_name)
+ # importlib's namespace loaders do not have this functionality but
+ # all the modules it loads are packages, so we can take advantage of
+ # this information.
+ elif (loader.__class__.__module__ == '_frozen_importlib' and
+ loader.__class__.__name__ == 'NamespaceLoader'):
+ return True
+ # Otherwise we need to fail with an error that explains what went
+ # wrong.
+ raise AttributeError(
+ ('%s.is_package() method is missing but is required by Flask of '
+ 'PEP 302 import hooks. If you do not use import hooks and '
+ 'you encounter this error please file a bug against Flask.') %
+ loader.__class__.__name__)
+
+
+def find_package(import_name):
+ """Finds a package and returns the prefix (or None if the package is
+ not installed) as well as the folder that contains the package or
+ module as a tuple. The package path returned is the module that would
+ have to be added to the pythonpath in order to make it possible to
+ import the module. The prefix is the path below which a UNIX like
+ folder structure exists (lib, share etc.).
+ """
+ root_mod_name = import_name.split('.')[0]
+ loader = pkgutil.get_loader(root_mod_name)
+ if loader is None or import_name == '__main__':
+ # import name is not found, or interactive/main module
+ package_path = os.getcwd()
+ else:
+ # For .egg, zipimporter does not have get_filename until Python 2.7.
+ if hasattr(loader, 'get_filename'):
+ filename = loader.get_filename(root_mod_name)
+ elif hasattr(loader, 'archive'):
+ # zipimporter's loader.archive points to the .egg or .zip
+ # archive filename is dropped in call to dirname below.
+ filename = loader.archive
+ else:
+ # At least one loader is missing both get_filename and archive:
+ # Google App Engine's HardenedModulesHook
+ #
+ # Fall back to imports.
+ __import__(import_name)
+ filename = sys.modules[import_name].__file__
+ package_path = os.path.abspath(os.path.dirname(filename))
+
+ # In case the root module is a package we need to chop of the
+ # rightmost part. This needs to go through a helper function
+ # because of python 3.3 namespace packages.
+ if _matching_loader_thinks_module_is_package(
+ loader, root_mod_name):
+ package_path = os.path.dirname(package_path)
+
+ site_parent, site_folder = os.path.split(package_path)
+ py_prefix = os.path.abspath(sys.prefix)
+ if package_path.startswith(py_prefix):
+ return py_prefix, package_path
+ elif site_folder.lower() == 'site-packages':
+ parent, folder = os.path.split(site_parent)
+ # Windows like installations
+ if folder.lower() == 'lib':
+ base_dir = parent
+ # UNIX like installations
+ elif os.path.basename(parent).lower() == 'lib':
+ base_dir = os.path.dirname(parent)
+ else:
+ base_dir = site_parent
+ return base_dir, package_path
+ return None, package_path
+
+
+class locked_cached_property(object):
+ """A decorator that converts a function into a lazy property. The
+ function wrapped is called the first time to retrieve the result
+ and then that calculated result is used the next time you access
+ the value. Works like the one in Werkzeug but has a lock for
+ thread safety.
+ """
+
+ def __init__(self, func, name=None, doc=None):
+ self.__name__ = name or func.__name__
+ self.__module__ = func.__module__
+ self.__doc__ = doc or func.__doc__
+ self.func = func
+ self.lock = RLock()
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ with self.lock:
+ value = obj.__dict__.get(self.__name__, _missing)
+ if value is _missing:
+ value = self.func(obj)
+ obj.__dict__[self.__name__] = value
+ return value
+
+
+class _PackageBoundObject(object):
+
+ def __init__(self, import_name, template_folder=None, root_path=None):
+ #: The name of the package or module. Do not change this once
+ #: it was set by the constructor.
+ self.import_name = import_name
+
+ #: location of the templates. ``None`` if templates should not be
+ #: exposed.
+ self.template_folder = template_folder
+
+ if root_path is None:
+ root_path = get_root_path(self.import_name)
+
+ #: Where is the app root located?
+ self.root_path = root_path
+
+ self._static_folder = None
+ self._static_url_path = None
+
+ def _get_static_folder(self):
+ if self._static_folder is not None:
+ return os.path.join(self.root_path, self._static_folder)
+ def _set_static_folder(self, value):
+ self._static_folder = value
+ static_folder = property(_get_static_folder, _set_static_folder, doc='''
+ The absolute path to the configured static folder.
+ ''')
+ del _get_static_folder, _set_static_folder
+
+ def _get_static_url_path(self):
+ if self._static_url_path is not None:
+ return self._static_url_path
+ if self.static_folder is not None:
+ return '/' + os.path.basename(self.static_folder)
+ def _set_static_url_path(self, value):
+ self._static_url_path = value
+ static_url_path = property(_get_static_url_path, _set_static_url_path)
+ del _get_static_url_path, _set_static_url_path
+
+ @property
+ def has_static_folder(self):
+ """This is ``True`` if the package bound object's container has a
+ folder for static files.
+
+ .. versionadded:: 0.5
+ """
+ return self.static_folder is not None
+
+ @locked_cached_property
+ def jinja_loader(self):
+ """The Jinja loader for this package bound object.
+
+ .. versionadded:: 0.5
+ """
+ if self.template_folder is not None:
+ return FileSystemLoader(os.path.join(self.root_path,
+ self.template_folder))
+
+ def get_send_file_max_age(self, filename):
+ """Provides default cache_timeout for the :func:`send_file` functions.
+
+ By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
+ the configuration of :data:`~flask.current_app`.
+
+ Static file functions such as :func:`send_from_directory` use this
+ function, and :func:`send_file` calls this function on
+ :data:`~flask.current_app` when the given cache_timeout is ``None``. If a
+ cache_timeout is given in :func:`send_file`, that timeout is used;
+ otherwise, this method is called.
+
+ This allows subclasses to change the behavior when sending files based
+ on the filename. For example, to set the cache timeout for .js files
+ to 60 seconds::
+
+ class MyFlask(flask.Flask):
+ def get_send_file_max_age(self, name):
+ if name.lower().endswith('.js'):
+ return 60
+ return flask.Flask.get_send_file_max_age(self, name)
+
+ .. versionadded:: 0.9
+ """
+ return total_seconds(current_app.send_file_max_age_default)
+
+ def send_static_file(self, filename):
+ """Function used internally to send static files from the static
+ folder to the browser.
+
+ .. versionadded:: 0.5
+ """
+ if not self.has_static_folder:
+ raise RuntimeError('No static folder for this object')
+ # Ensure get_send_file_max_age is called in all cases.
+ # Here, we ensure get_send_file_max_age is called for Blueprints.
+ cache_timeout = self.get_send_file_max_age(filename)
+ return send_from_directory(self.static_folder, filename,
+ cache_timeout=cache_timeout)
+
+ def open_resource(self, resource, mode='rb'):
+ """Opens a resource from the application's resource folder. To see
+ how this works, consider the following folder structure::
+
+ /myapplication.py
+ /schema.sql
+ /static
+ /style.css
+ /templates
+ /layout.html
+ /index.html
+
+ If you want to open the :file:`schema.sql` file you would do the
+ following::
+
+ with app.open_resource('schema.sql') as f:
+ contents = f.read()
+ do_something_with(contents)
+
+ :param resource: the name of the resource. To access resources within
+ subfolders use forward slashes as separator.
+ :param mode: resource file opening mode, default is 'rb'.
+ """
+ if mode not in ('r', 'rb'):
+ raise ValueError('Resources can only be opened for reading')
+ return open(os.path.join(self.root_path, resource), mode)
+
+
+def total_seconds(td):
+ """Returns the total seconds from a timedelta object.
+
+ :param timedelta td: the timedelta to be converted in seconds
+
+ :returns: number of seconds
+ :rtype: int
+ """
+ return td.days * 60 * 60 * 24 + td.seconds
diff --git a/app/lib/flask/json.py b/app/lib/flask/json.py
new file mode 100644
index 0000000..16e0c29
--- /dev/null
+++ b/app/lib/flask/json.py
@@ -0,0 +1,269 @@
+# -*- coding: utf-8 -*-
+"""
+ flask.jsonimpl
+ ~~~~~~~~~~~~~~
+
+ Implementation helpers for the JSON support in Flask.
+
+ :copyright: (c) 2015 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import io
+import uuid
+from datetime import date
+from .globals import current_app, request
+from ._compat import text_type, PY2
+
+from werkzeug.http import http_date
+from jinja2 import Markup
+
+# Use the same json implementation as itsdangerous on which we
+# depend anyways.
+from itsdangerous import json as _json
+
+
+# Figure out if simplejson escapes slashes. This behavior was changed
+# from one version to another without reason.
+_slash_escape = '\\/' not in _json.dumps('/')
+
+
+__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
+ 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
+ 'jsonify']
+
+
+def _wrap_reader_for_text(fp, encoding):
+ if isinstance(fp.read(0), bytes):
+ fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
+ return fp
+
+
+def _wrap_writer_for_text(fp, encoding):
+ try:
+ fp.write('')
+ except TypeError:
+ fp = io.TextIOWrapper(fp, encoding)
+ return fp
+
+
+class JSONEncoder(_json.JSONEncoder):
+ """The default Flask JSON encoder. This one extends the default simplejson
+ encoder by also supporting ``datetime`` objects, ``UUID`` as well as
+ ``Markup`` objects which are serialized as RFC 822 datetime strings (same
+ as the HTTP date format). In order to support more data types override the
+ :meth:`default` method.
+ """
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns a
+ serializable object for ``o``, or calls the base implementation (to
+ raise a :exc:`TypeError`).
+
+ For example, to support arbitrary iterators, you could implement
+ default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+ """
+ if isinstance(o, date):
+ return http_date(o.timetuple())
+ if isinstance(o, uuid.UUID):
+ return str(o)
+ if hasattr(o, '__html__'):
+ return text_type(o.__html__())
+ return _json.JSONEncoder.default(self, o)
+
+
+class JSONDecoder(_json.JSONDecoder):
+ """The default JSON decoder. This one does not change the behavior from
+ the default simplejson decoder. Consult the :mod:`json` documentation
+ for more information. This decoder is not only used for the load
+ functions of this module but also :attr:`~flask.Request`.
+ """
+
+
+def _dump_arg_defaults(kwargs):
+ """Inject default arguments for dump functions."""
+ if current_app:
+ kwargs.setdefault('cls', current_app.json_encoder)
+ if not current_app.config['JSON_AS_ASCII']:
+ kwargs.setdefault('ensure_ascii', False)
+ kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
+ else:
+ kwargs.setdefault('sort_keys', True)
+ kwargs.setdefault('cls', JSONEncoder)
+
+
+def _load_arg_defaults(kwargs):
+ """Inject default arguments for load functions."""
+ if current_app:
+ kwargs.setdefault('cls', current_app.json_decoder)
+ else:
+ kwargs.setdefault('cls', JSONDecoder)
+
+
+def dumps(obj, **kwargs):
+ """Serialize ``obj`` to a JSON formatted ``str`` by using the application's
+ configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
+ application on the stack.
+
+ This function can return ``unicode`` strings or ascii-only bytestrings by
+ default which coerce into unicode strings automatically. That behavior by
+ default is controlled by the ``JSON_AS_ASCII`` configuration variable
+ and can be overridden by the simplejson ``ensure_ascii`` parameter.
+ """
+ _dump_arg_defaults(kwargs)
+ encoding = kwargs.pop('encoding', None)
+ rv = _json.dumps(obj, **kwargs)
+ if encoding is not None and isinstance(rv, text_type):
+ rv = rv.encode(encoding)
+ return rv
+
+
+def dump(obj, fp, **kwargs):
+ """Like :func:`dumps` but writes into a file object."""
+ _dump_arg_defaults(kwargs)
+ encoding = kwargs.pop('encoding', None)
+ if encoding is not None:
+ fp = _wrap_writer_for_text(fp, encoding)
+ _json.dump(obj, fp, **kwargs)
+
+
+def loads(s, **kwargs):
+ """Unserialize a JSON object from a string ``s`` by using the application's
+ configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
+ application on the stack.
+ """
+ _load_arg_defaults(kwargs)
+ if isinstance(s, bytes):
+ s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
+ return _json.loads(s, **kwargs)
+
+
+def load(fp, **kwargs):
+ """Like :func:`loads` but reads from a file object.
+ """
+ _load_arg_defaults(kwargs)
+ if not PY2:
+ fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
+ return _json.load(fp, **kwargs)
+
+
+def htmlsafe_dumps(obj, **kwargs):
+ """Works exactly like :func:`dumps` but is safe for use in ``'
+ self.assertEqual(
+ r'"\u003c/script\u003e\u003cscript\u003e'
+ r'alert(\"gotcha\")\u003c/script\u003e"',
+ self.encoder.encode(bad_string))
+ self.assertEqual(
+ bad_string, self.decoder.decode(
+ self.encoder.encode(bad_string)))
diff --git a/app/lib/simplejson/tests/test_errors.py b/app/lib/simplejson/tests/test_errors.py
new file mode 100644
index 0000000..8dede38
--- /dev/null
+++ b/app/lib/simplejson/tests/test_errors.py
@@ -0,0 +1,51 @@
+import sys, pickle
+from unittest import TestCase
+
+import simplejson as json
+from simplejson.compat import u, b
+
+class TestErrors(TestCase):
+ def test_string_keys_error(self):
+ data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}]
+ self.assertRaises(TypeError, json.dumps, data)
+
+ def test_decode_error(self):
+ err = None
+ try:
+ json.loads('{}\na\nb')
+ except json.JSONDecodeError:
+ err = sys.exc_info()[1]
+ else:
+ self.fail('Expected JSONDecodeError')
+ self.assertEqual(err.lineno, 2)
+ self.assertEqual(err.colno, 1)
+ self.assertEqual(err.endlineno, 3)
+ self.assertEqual(err.endcolno, 2)
+
+ def test_scan_error(self):
+ err = None
+ for t in (u, b):
+ try:
+ json.loads(t('{"asdf": "'))
+ except json.JSONDecodeError:
+ err = sys.exc_info()[1]
+ else:
+ self.fail('Expected JSONDecodeError')
+ self.assertEqual(err.lineno, 1)
+ self.assertEqual(err.colno, 10)
+
+ def test_error_is_pickable(self):
+ err = None
+ try:
+ json.loads('{}\na\nb')
+ except json.JSONDecodeError:
+ err = sys.exc_info()[1]
+ else:
+ self.fail('Expected JSONDecodeError')
+ s = pickle.dumps(err)
+ e = pickle.loads(s)
+
+ self.assertEqual(err.msg, e.msg)
+ self.assertEqual(err.doc, e.doc)
+ self.assertEqual(err.pos, e.pos)
+ self.assertEqual(err.end, e.end)
diff --git a/app/lib/simplejson/tests/test_fail.py b/app/lib/simplejson/tests/test_fail.py
new file mode 100644
index 0000000..788f3a5
--- /dev/null
+++ b/app/lib/simplejson/tests/test_fail.py
@@ -0,0 +1,176 @@
+import sys
+from unittest import TestCase
+
+import simplejson as json
+
+# 2007-10-05
+JSONDOCS = [
+ # http://json.org/JSON_checker/test/fail1.json
+ '"A JSON payload should be an object or array, not a string."',
+ # http://json.org/JSON_checker/test/fail2.json
+ '["Unclosed array"',
+ # http://json.org/JSON_checker/test/fail3.json
+ '{unquoted_key: "keys must be quoted"}',
+ # http://json.org/JSON_checker/test/fail4.json
+ '["extra comma",]',
+ # http://json.org/JSON_checker/test/fail5.json
+ '["double extra comma",,]',
+ # http://json.org/JSON_checker/test/fail6.json
+ '[ , "<-- missing value"]',
+ # http://json.org/JSON_checker/test/fail7.json
+ '["Comma after the close"],',
+ # http://json.org/JSON_checker/test/fail8.json
+ '["Extra close"]]',
+ # http://json.org/JSON_checker/test/fail9.json
+ '{"Extra comma": true,}',
+ # http://json.org/JSON_checker/test/fail10.json
+ '{"Extra value after close": true} "misplaced quoted value"',
+ # http://json.org/JSON_checker/test/fail11.json
+ '{"Illegal expression": 1 + 2}',
+ # http://json.org/JSON_checker/test/fail12.json
+ '{"Illegal invocation": alert()}',
+ # http://json.org/JSON_checker/test/fail13.json
+ '{"Numbers cannot have leading zeroes": 013}',
+ # http://json.org/JSON_checker/test/fail14.json
+ '{"Numbers cannot be hex": 0x14}',
+ # http://json.org/JSON_checker/test/fail15.json
+ '["Illegal backslash escape: \\x15"]',
+ # http://json.org/JSON_checker/test/fail16.json
+ '[\\naked]',
+ # http://json.org/JSON_checker/test/fail17.json
+ '["Illegal backslash escape: \\017"]',
+ # http://json.org/JSON_checker/test/fail18.json
+ '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
+ # http://json.org/JSON_checker/test/fail19.json
+ '{"Missing colon" null}',
+ # http://json.org/JSON_checker/test/fail20.json
+ '{"Double colon":: null}',
+ # http://json.org/JSON_checker/test/fail21.json
+ '{"Comma instead of colon", null}',
+ # http://json.org/JSON_checker/test/fail22.json
+ '["Colon instead of comma": false]',
+ # http://json.org/JSON_checker/test/fail23.json
+ '["Bad value", truth]',
+ # http://json.org/JSON_checker/test/fail24.json
+ "['single quote']",
+ # http://json.org/JSON_checker/test/fail25.json
+ '["\ttab\tcharacter\tin\tstring\t"]',
+ # http://json.org/JSON_checker/test/fail26.json
+ '["tab\\ character\\ in\\ string\\ "]',
+ # http://json.org/JSON_checker/test/fail27.json
+ '["line\nbreak"]',
+ # http://json.org/JSON_checker/test/fail28.json
+ '["line\\\nbreak"]',
+ # http://json.org/JSON_checker/test/fail29.json
+ '[0e]',
+ # http://json.org/JSON_checker/test/fail30.json
+ '[0e+]',
+ # http://json.org/JSON_checker/test/fail31.json
+ '[0e+-1]',
+ # http://json.org/JSON_checker/test/fail32.json
+ '{"Comma instead if closing brace": true,',
+ # http://json.org/JSON_checker/test/fail33.json
+ '["mismatch"}',
+ # http://code.google.com/p/simplejson/issues/detail?id=3
+ u'["A\u001FZ control characters in string"]',
+ # misc based on coverage
+ '{',
+ '{]',
+ '{"foo": "bar"]',
+ '{"foo": "bar"',
+ 'nul',
+ 'nulx',
+ '-',
+ '-x',
+ '-e',
+ '-e0',
+ '-Infinite',
+ '-Inf',
+ 'Infinit',
+ 'Infinite',
+ 'NaM',
+ 'NuN',
+ 'falsy',
+ 'fal',
+ 'trug',
+ 'tru',
+ '1e',
+ '1ex',
+ '1e-',
+ '1e-x',
+]
+
+SKIPS = {
+ 1: "why not have a string payload?",
+ 18: "spec doesn't specify any nesting limitations",
+}
+
+class TestFail(TestCase):
+ def test_failures(self):
+ for idx, doc in enumerate(JSONDOCS):
+ idx = idx + 1
+ if idx in SKIPS:
+ json.loads(doc)
+ continue
+ try:
+ json.loads(doc)
+ except json.JSONDecodeError:
+ pass
+ else:
+ self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
+
+ def test_array_decoder_issue46(self):
+ # http://code.google.com/p/simplejson/issues/detail?id=46
+ for doc in [u'[,]', '[,]']:
+ try:
+ json.loads(doc)
+ except json.JSONDecodeError:
+ e = sys.exc_info()[1]
+ self.assertEqual(e.pos, 1)
+ self.assertEqual(e.lineno, 1)
+ self.assertEqual(e.colno, 2)
+ except Exception:
+ e = sys.exc_info()[1]
+ self.fail("Unexpected exception raised %r %s" % (e, e))
+ else:
+ self.fail("Unexpected success parsing '[,]'")
+
+ def test_truncated_input(self):
+ test_cases = [
+ ('', 'Expecting value', 0),
+ ('[', "Expecting value or ']'", 1),
+ ('[42', "Expecting ',' delimiter", 3),
+ ('[42,', 'Expecting value', 4),
+ ('["', 'Unterminated string starting at', 1),
+ ('["spam', 'Unterminated string starting at', 1),
+ ('["spam"', "Expecting ',' delimiter", 7),
+ ('["spam",', 'Expecting value', 8),
+ ('{', 'Expecting property name enclosed in double quotes', 1),
+ ('{"', 'Unterminated string starting at', 1),
+ ('{"spam', 'Unterminated string starting at', 1),
+ ('{"spam"', "Expecting ':' delimiter", 7),
+ ('{"spam":', 'Expecting value', 8),
+ ('{"spam":42', "Expecting ',' delimiter", 10),
+ ('{"spam":42,', 'Expecting property name enclosed in double quotes',
+ 11),
+ ('"', 'Unterminated string starting at', 0),
+ ('"spam', 'Unterminated string starting at', 0),
+ ('[,', "Expecting value", 1),
+ ]
+ for data, msg, idx in test_cases:
+ try:
+ json.loads(data)
+ except json.JSONDecodeError:
+ e = sys.exc_info()[1]
+ self.assertEqual(
+ e.msg[:len(msg)],
+ msg,
+ "%r doesn't start with %r for %r" % (e.msg, msg, data))
+ self.assertEqual(
+ e.pos, idx,
+ "pos %r != %r for %r" % (e.pos, idx, data))
+ except Exception:
+ e = sys.exc_info()[1]
+ self.fail("Unexpected exception raised %r %s" % (e, e))
+ else:
+ self.fail("Unexpected success parsing '%r'" % (data,))
diff --git a/app/lib/simplejson/tests/test_float.py b/app/lib/simplejson/tests/test_float.py
new file mode 100644
index 0000000..e382ec2
--- /dev/null
+++ b/app/lib/simplejson/tests/test_float.py
@@ -0,0 +1,35 @@
+import math
+from unittest import TestCase
+from simplejson.compat import long_type, text_type
+import simplejson as json
+from simplejson.decoder import NaN, PosInf, NegInf
+
+class TestFloat(TestCase):
+ def test_degenerates_allow(self):
+ for inf in (PosInf, NegInf):
+ self.assertEqual(json.loads(json.dumps(inf)), inf)
+ # Python 2.5 doesn't have math.isnan
+ nan = json.loads(json.dumps(NaN))
+ self.assertTrue((0 + nan) != nan)
+
+ def test_degenerates_ignore(self):
+ for f in (PosInf, NegInf, NaN):
+ self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None)
+
+ def test_degenerates_deny(self):
+ for f in (PosInf, NegInf, NaN):
+ self.assertRaises(ValueError, json.dumps, f, allow_nan=False)
+
+ def test_floats(self):
+ for num in [1617161771.7650001, math.pi, math.pi**100,
+ math.pi**-100, 3.1]:
+ self.assertEqual(float(json.dumps(num)), num)
+ self.assertEqual(json.loads(json.dumps(num)), num)
+ self.assertEqual(json.loads(text_type(json.dumps(num))), num)
+
+ def test_ints(self):
+ for num in [1, long_type(1), 1<<32, 1<<64]:
+ self.assertEqual(json.dumps(num), str(num))
+ self.assertEqual(int(json.dumps(num)), num)
+ self.assertEqual(json.loads(json.dumps(num)), num)
+ self.assertEqual(json.loads(text_type(json.dumps(num))), num)
diff --git a/app/lib/simplejson/tests/test_for_json.py b/app/lib/simplejson/tests/test_for_json.py
new file mode 100644
index 0000000..b791b88
--- /dev/null
+++ b/app/lib/simplejson/tests/test_for_json.py
@@ -0,0 +1,97 @@
+import unittest
+import simplejson as json
+
+
+class ForJson(object):
+ def for_json(self):
+ return {'for_json': 1}
+
+
+class NestedForJson(object):
+ def for_json(self):
+ return {'nested': ForJson()}
+
+
+class ForJsonList(object):
+ def for_json(self):
+ return ['list']
+
+
+class DictForJson(dict):
+ def for_json(self):
+ return {'alpha': 1}
+
+
+class ListForJson(list):
+ def for_json(self):
+ return ['list']
+
+
+class TestForJson(unittest.TestCase):
+ def assertRoundTrip(self, obj, other, for_json=True):
+ if for_json is None:
+ # None will use the default
+ s = json.dumps(obj)
+ else:
+ s = json.dumps(obj, for_json=for_json)
+ self.assertEqual(
+ json.loads(s),
+ other)
+
+ def test_for_json_encodes_stand_alone_object(self):
+ self.assertRoundTrip(
+ ForJson(),
+ ForJson().for_json())
+
+ def test_for_json_encodes_object_nested_in_dict(self):
+ self.assertRoundTrip(
+ {'hooray': ForJson()},
+ {'hooray': ForJson().for_json()})
+
+ def test_for_json_encodes_object_nested_in_list_within_dict(self):
+ self.assertRoundTrip(
+ {'list': [0, ForJson(), 2, 3]},
+ {'list': [0, ForJson().for_json(), 2, 3]})
+
+ def test_for_json_encodes_object_nested_within_object(self):
+ self.assertRoundTrip(
+ NestedForJson(),
+ {'nested': {'for_json': 1}})
+
+ def test_for_json_encodes_list(self):
+ self.assertRoundTrip(
+ ForJsonList(),
+ ForJsonList().for_json())
+
+ def test_for_json_encodes_list_within_object(self):
+ self.assertRoundTrip(
+ {'nested': ForJsonList()},
+ {'nested': ForJsonList().for_json()})
+
+ def test_for_json_encodes_dict_subclass(self):
+ self.assertRoundTrip(
+ DictForJson(a=1),
+ DictForJson(a=1).for_json())
+
+ def test_for_json_encodes_list_subclass(self):
+ self.assertRoundTrip(
+ ListForJson(['l']),
+ ListForJson(['l']).for_json())
+
+ def test_for_json_ignored_if_not_true_with_dict_subclass(self):
+ for for_json in (None, False):
+ self.assertRoundTrip(
+ DictForJson(a=1),
+ {'a': 1},
+ for_json=for_json)
+
+ def test_for_json_ignored_if_not_true_with_list_subclass(self):
+ for for_json in (None, False):
+ self.assertRoundTrip(
+ ListForJson(['l']),
+ ['l'],
+ for_json=for_json)
+
+ def test_raises_typeerror_if_for_json_not_true_with_object(self):
+ self.assertRaises(TypeError, json.dumps, ForJson())
+ self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
diff --git a/app/lib/simplejson/tests/test_indent.py b/app/lib/simplejson/tests/test_indent.py
new file mode 100644
index 0000000..cea25a5
--- /dev/null
+++ b/app/lib/simplejson/tests/test_indent.py
@@ -0,0 +1,86 @@
+from unittest import TestCase
+import textwrap
+
+import simplejson as json
+from simplejson.compat import StringIO
+
+class TestIndent(TestCase):
+ def test_indent(self):
+ h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh',
+ 'i-vhbjkhnth',
+ {'nifty': 87}, {'field': 'yes', 'morefield': False} ]
+
+ expect = textwrap.dedent("""\
+ [
+ \t[
+ \t\t"blorpie"
+ \t],
+ \t[
+ \t\t"whoops"
+ \t],
+ \t[],
+ \t"d-shtaeou",
+ \t"d-nthiouh",
+ \t"i-vhbjkhnth",
+ \t{
+ \t\t"nifty": 87
+ \t},
+ \t{
+ \t\t"field": "yes",
+ \t\t"morefield": false
+ \t}
+ ]""")
+
+
+ d1 = json.dumps(h)
+ d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
+ d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': '))
+ d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
+
+ h1 = json.loads(d1)
+ h2 = json.loads(d2)
+ h3 = json.loads(d3)
+ h4 = json.loads(d4)
+
+ self.assertEqual(h1, h)
+ self.assertEqual(h2, h)
+ self.assertEqual(h3, h)
+ self.assertEqual(h4, h)
+ self.assertEqual(d3, expect.replace('\t', ' '))
+ self.assertEqual(d4, expect.replace('\t', ' '))
+ # NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
+ # so the following is expected to fail. Python 2.4 is not a
+ # supported platform in simplejson 2.1.0+.
+ self.assertEqual(d2, expect)
+
+ def test_indent0(self):
+ h = {3: 1}
+ def check(indent, expected):
+ d1 = json.dumps(h, indent=indent)
+ self.assertEqual(d1, expected)
+
+ sio = StringIO()
+ json.dump(h, sio, indent=indent)
+ self.assertEqual(sio.getvalue(), expected)
+
+ # indent=0 should emit newlines
+ check(0, '{\n"3": 1\n}')
+ # indent=None is more compact
+ check(None, '{"3": 1}')
+
+ def test_separators(self):
+ lst = [1,2,3,4]
+ expect = '[\n1,\n2,\n3,\n4\n]'
+ expect_spaces = '[\n1, \n2, \n3, \n4\n]'
+ # Ensure that separators still works
+ self.assertEqual(
+ expect_spaces,
+ json.dumps(lst, indent=0, separators=(', ', ': ')))
+ # Force the new defaults
+ self.assertEqual(
+ expect,
+ json.dumps(lst, indent=0, separators=(',', ': ')))
+ # Added in 2.1.4
+ self.assertEqual(
+ expect,
+ json.dumps(lst, indent=0))
diff --git a/app/lib/simplejson/tests/test_item_sort_key.py b/app/lib/simplejson/tests/test_item_sort_key.py
new file mode 100644
index 0000000..b05bfc8
--- /dev/null
+++ b/app/lib/simplejson/tests/test_item_sort_key.py
@@ -0,0 +1,20 @@
+from unittest import TestCase
+
+import simplejson as json
+from operator import itemgetter
+
+class TestItemSortKey(TestCase):
+ def test_simple_first(self):
+ a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
+ self.assertEqual(
+ '{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
+ json.dumps(a, item_sort_key=json.simple_first))
+
+ def test_case(self):
+ a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
+ self.assertEqual(
+ '{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
+ json.dumps(a, item_sort_key=itemgetter(0)))
+ self.assertEqual(
+ '{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
+ json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
diff --git a/app/lib/simplejson/tests/test_iterable.py b/app/lib/simplejson/tests/test_iterable.py
new file mode 100644
index 0000000..35d3e75
--- /dev/null
+++ b/app/lib/simplejson/tests/test_iterable.py
@@ -0,0 +1,31 @@
+import unittest
+from simplejson.compat import StringIO
+
+import simplejson as json
+
+def iter_dumps(obj, **kw):
+ return ''.join(json.JSONEncoder(**kw).iterencode(obj))
+
+def sio_dump(obj, **kw):
+ sio = StringIO()
+ json.dumps(obj, **kw)
+ return sio.getvalue()
+
+class TestIterable(unittest.TestCase):
+ def test_iterable(self):
+ for l in ([], [1], [1, 2], [1, 2, 3]):
+ for opts in [{}, {'indent': 2}]:
+ for dumps in (json.dumps, iter_dumps, sio_dump):
+ expect = dumps(l, **opts)
+ default_expect = dumps(sum(l), **opts)
+ # Default is False
+ self.assertRaises(TypeError, dumps, iter(l), **opts)
+ self.assertRaises(TypeError, dumps, iter(l), iterable_as_array=False, **opts)
+ self.assertEqual(expect, dumps(iter(l), iterable_as_array=True, **opts))
+ # Ensure that the "default" gets called
+ self.assertEqual(default_expect, dumps(iter(l), default=sum, **opts))
+ self.assertEqual(default_expect, dumps(iter(l), iterable_as_array=False, default=sum, **opts))
+ # Ensure that the "default" does not get called
+ self.assertEqual(
+ expect,
+ dumps(iter(l), iterable_as_array=True, default=sum, **opts))
diff --git a/app/lib/simplejson/tests/test_namedtuple.py b/app/lib/simplejson/tests/test_namedtuple.py
new file mode 100644
index 0000000..4387894
--- /dev/null
+++ b/app/lib/simplejson/tests/test_namedtuple.py
@@ -0,0 +1,122 @@
+from __future__ import absolute_import
+import unittest
+import simplejson as json
+from simplejson.compat import StringIO
+
+try:
+ from collections import namedtuple
+except ImportError:
+ class Value(tuple):
+ def __new__(cls, *args):
+ return tuple.__new__(cls, args)
+
+ def _asdict(self):
+ return {'value': self[0]}
+ class Point(tuple):
+ def __new__(cls, *args):
+ return tuple.__new__(cls, args)
+
+ def _asdict(self):
+ return {'x': self[0], 'y': self[1]}
+else:
+ Value = namedtuple('Value', ['value'])
+ Point = namedtuple('Point', ['x', 'y'])
+
+class DuckValue(object):
+ def __init__(self, *args):
+ self.value = Value(*args)
+
+ def _asdict(self):
+ return self.value._asdict()
+
+class DuckPoint(object):
+ def __init__(self, *args):
+ self.point = Point(*args)
+
+ def _asdict(self):
+ return self.point._asdict()
+
+class DeadDuck(object):
+ _asdict = None
+
+class DeadDict(dict):
+ _asdict = None
+
+CONSTRUCTORS = [
+ lambda v: v,
+ lambda v: [v],
+ lambda v: [{'key': v}],
+]
+
+class TestNamedTuple(unittest.TestCase):
+ def test_namedtuple_dumps(self):
+ for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
+ d = v._asdict()
+ self.assertEqual(d, json.loads(json.dumps(v)))
+ self.assertEqual(
+ d,
+ json.loads(json.dumps(v, namedtuple_as_object=True)))
+ self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False)))
+ self.assertEqual(
+ d,
+ json.loads(json.dumps(v, namedtuple_as_object=True,
+ tuple_as_array=False)))
+
+ def test_namedtuple_dumps_false(self):
+ for v in [Value(1), Point(1, 2)]:
+ l = list(v)
+ self.assertEqual(
+ l,
+ json.loads(json.dumps(v, namedtuple_as_object=False)))
+ self.assertRaises(TypeError, json.dumps, v,
+ tuple_as_array=False, namedtuple_as_object=False)
+
+ def test_namedtuple_dump(self):
+ for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
+ d = v._asdict()
+ sio = StringIO()
+ json.dump(v, sio)
+ self.assertEqual(d, json.loads(sio.getvalue()))
+ sio = StringIO()
+ json.dump(v, sio, namedtuple_as_object=True)
+ self.assertEqual(
+ d,
+ json.loads(sio.getvalue()))
+ sio = StringIO()
+ json.dump(v, sio, tuple_as_array=False)
+ self.assertEqual(d, json.loads(sio.getvalue()))
+ sio = StringIO()
+ json.dump(v, sio, namedtuple_as_object=True,
+ tuple_as_array=False)
+ self.assertEqual(
+ d,
+ json.loads(sio.getvalue()))
+
+ def test_namedtuple_dump_false(self):
+ for v in [Value(1), Point(1, 2)]:
+ l = list(v)
+ sio = StringIO()
+ json.dump(v, sio, namedtuple_as_object=False)
+ self.assertEqual(
+ l,
+ json.loads(sio.getvalue()))
+ self.assertRaises(TypeError, json.dump, v, StringIO(),
+ tuple_as_array=False, namedtuple_as_object=False)
+
+ def test_asdict_not_callable_dump(self):
+ for f in CONSTRUCTORS:
+ self.assertRaises(TypeError,
+ json.dump, f(DeadDuck()), StringIO(), namedtuple_as_object=True)
+ sio = StringIO()
+ json.dump(f(DeadDict()), sio, namedtuple_as_object=True)
+ self.assertEqual(
+ json.dumps(f({})),
+ sio.getvalue())
+
+ def test_asdict_not_callable_dumps(self):
+ for f in CONSTRUCTORS:
+ self.assertRaises(TypeError,
+ json.dumps, f(DeadDuck()), namedtuple_as_object=True)
+ self.assertEqual(
+ json.dumps(f({})),
+ json.dumps(f(DeadDict()), namedtuple_as_object=True))
diff --git a/app/lib/simplejson/tests/test_pass1.py b/app/lib/simplejson/tests/test_pass1.py
new file mode 100644
index 0000000..f0b5b10
--- /dev/null
+++ b/app/lib/simplejson/tests/test_pass1.py
@@ -0,0 +1,71 @@
+from unittest import TestCase
+
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass1.json
+JSON = r'''
+[
+ "JSON Test Pattern pass1",
+ {"object with 1 member":["array with 1 element"]},
+ {},
+ [],
+ -42,
+ true,
+ false,
+ null,
+ {
+ "integer": 1234567890,
+ "real": -9876.543210,
+ "e": 0.123456789e-12,
+ "E": 1.234567890E+34,
+ "": 23456789012E66,
+ "zero": 0,
+ "one": 1,
+ "space": " ",
+ "quote": "\"",
+ "backslash": "\\",
+ "controls": "\b\f\n\r\t",
+ "slash": "/ & \/",
+ "alpha": "abcdefghijklmnopqrstuvwyz",
+ "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
+ "digit": "0123456789",
+ "special": "`1~!@#$%^&*()_+-={':[,]}|;.>?",
+ "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
+ "true": true,
+ "false": false,
+ "null": null,
+ "array":[ ],
+ "object":{ },
+ "address": "50 St. James Street",
+ "url": "http://www.JSON.org/",
+ "comment": "// /* */": " ",
+ " s p a c e d " :[1,2 , 3
+
+,
+
+4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7],
+ "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
+ "quotes": "" \u0022 %22 0x22 034 "",
+ "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
+: "A key can be any string"
+ },
+ 0.5 ,98.6
+,
+99.44
+,
+
+1066,
+1e1,
+0.1e1,
+1e-1,
+1e00,2e+00,2e-00
+,"rosebud"]
+'''
+
+class TestPass1(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEqual(res, json.loads(out))
diff --git a/app/lib/simplejson/tests/test_pass2.py b/app/lib/simplejson/tests/test_pass2.py
new file mode 100644
index 0000000..5d812b3
--- /dev/null
+++ b/app/lib/simplejson/tests/test_pass2.py
@@ -0,0 +1,14 @@
+from unittest import TestCase
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass2.json
+JSON = r'''
+[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
+'''
+
+class TestPass2(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEqual(res, json.loads(out))
diff --git a/app/lib/simplejson/tests/test_pass3.py b/app/lib/simplejson/tests/test_pass3.py
new file mode 100644
index 0000000..821d60b
--- /dev/null
+++ b/app/lib/simplejson/tests/test_pass3.py
@@ -0,0 +1,20 @@
+from unittest import TestCase
+
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass3.json
+JSON = r'''
+{
+ "JSON Test Pattern pass3": {
+ "The outermost value": "must be an object or array.",
+ "In this test": "It is an object."
+ }
+}
+'''
+
+class TestPass3(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEqual(res, json.loads(out))
diff --git a/app/lib/simplejson/tests/test_raw_json.py b/app/lib/simplejson/tests/test_raw_json.py
new file mode 100644
index 0000000..1dfcc2c
--- /dev/null
+++ b/app/lib/simplejson/tests/test_raw_json.py
@@ -0,0 +1,47 @@
+import unittest
+import simplejson as json
+
+dct1 = {
+ 'key1': 'value1'
+}
+
+dct2 = {
+ 'key2': 'value2',
+ 'd1': dct1
+}
+
+dct3 = {
+ 'key2': 'value2',
+ 'd1': json.dumps(dct1)
+}
+
+dct4 = {
+ 'key2': 'value2',
+ 'd1': json.RawJSON(json.dumps(dct1))
+}
+
+
+class TestRawJson(unittest.TestCase):
+
+ def test_normal_str(self):
+ self.assertNotEqual(json.dumps(dct2), json.dumps(dct3))
+
+ def test_raw_json_str(self):
+ self.assertEqual(json.dumps(dct2), json.dumps(dct4))
+ self.assertEqual(dct2, json.loads(json.dumps(dct4)))
+
+ def test_list(self):
+ self.assertEqual(
+ json.dumps([dct2]),
+ json.dumps([json.RawJSON(json.dumps(dct2))]))
+ self.assertEqual(
+ [dct2],
+ json.loads(json.dumps([json.RawJSON(json.dumps(dct2))])))
+
+ def test_direct(self):
+ self.assertEqual(
+ json.dumps(dct2),
+ json.dumps(json.RawJSON(json.dumps(dct2))))
+ self.assertEqual(
+ dct2,
+ json.loads(json.dumps(json.RawJSON(json.dumps(dct2)))))
diff --git a/app/lib/simplejson/tests/test_recursion.py b/app/lib/simplejson/tests/test_recursion.py
new file mode 100644
index 0000000..662eb66
--- /dev/null
+++ b/app/lib/simplejson/tests/test_recursion.py
@@ -0,0 +1,67 @@
+from unittest import TestCase
+
+import simplejson as json
+
+class JSONTestObject:
+ pass
+
+
+class RecursiveJSONEncoder(json.JSONEncoder):
+ recurse = False
+ def default(self, o):
+ if o is JSONTestObject:
+ if self.recurse:
+ return [JSONTestObject]
+ else:
+ return 'JSONTestObject'
+ return json.JSONEncoder.default(o)
+
+
+class TestRecursion(TestCase):
+ def test_listrecursion(self):
+ x = []
+ x.append(x)
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on list recursion")
+ x = []
+ y = [x]
+ x.append(y)
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on alternating list recursion")
+ y = []
+ x = [y, y]
+ # ensure that the marker is cleared
+ json.dumps(x)
+
+ def test_dictrecursion(self):
+ x = {}
+ x["test"] = x
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on dict recursion")
+ x = {}
+ y = {"a": x, "b": x}
+ # ensure that the marker is cleared
+ json.dumps(y)
+
+ def test_defaultrecursion(self):
+ enc = RecursiveJSONEncoder()
+ self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
+ enc.recurse = True
+ try:
+ enc.encode(JSONTestObject)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on default recursion")
diff --git a/app/lib/simplejson/tests/test_scanstring.py b/app/lib/simplejson/tests/test_scanstring.py
new file mode 100644
index 0000000..3d98f0d
--- /dev/null
+++ b/app/lib/simplejson/tests/test_scanstring.py
@@ -0,0 +1,194 @@
+import sys
+from unittest import TestCase
+
+import simplejson as json
+import simplejson.decoder
+from simplejson.compat import b, PY3
+
+class TestScanString(TestCase):
+ # The bytes type is intentionally not used in most of these tests
+ # under Python 3 because the decoder immediately coerces to str before
+ # calling scanstring. In Python 2 we are testing the code paths
+ # for both unicode and str.
+ #
+ # The reason this is done is because Python 3 would require
+ # entirely different code paths for parsing bytes and str.
+ #
+ def test_py_scanstring(self):
+ self._test_scanstring(simplejson.decoder.py_scanstring)
+
+ def test_c_scanstring(self):
+ if not simplejson.decoder.c_scanstring:
+ return
+ self._test_scanstring(simplejson.decoder.c_scanstring)
+
+ def _test_scanstring(self, scanstring):
+ if sys.maxunicode == 65535:
+ self.assertEqual(
+ scanstring(u'"z\U0001d120x"', 1, None, True),
+ (u'z\U0001d120x', 6))
+ else:
+ self.assertEqual(
+ scanstring(u'"z\U0001d120x"', 1, None, True),
+ (u'z\U0001d120x', 5))
+
+ self.assertEqual(
+ scanstring('"\\u007b"', 1, None, True),
+ (u'{', 8))
+
+ self.assertEqual(
+ scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
+ (u'A JSON payload should be an object or array, not a string.', 60))
+
+ self.assertEqual(
+ scanstring('["Unclosed array"', 2, None, True),
+ (u'Unclosed array', 17))
+
+ self.assertEqual(
+ scanstring('["extra comma",]', 2, None, True),
+ (u'extra comma', 14))
+
+ self.assertEqual(
+ scanstring('["double extra comma",,]', 2, None, True),
+ (u'double extra comma', 21))
+
+ self.assertEqual(
+ scanstring('["Comma after the close"],', 2, None, True),
+ (u'Comma after the close', 24))
+
+ self.assertEqual(
+ scanstring('["Extra close"]]', 2, None, True),
+ (u'Extra close', 14))
+
+ self.assertEqual(
+ scanstring('{"Extra comma": true,}', 2, None, True),
+ (u'Extra comma', 14))
+
+ self.assertEqual(
+ scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
+ (u'Extra value after close', 26))
+
+ self.assertEqual(
+ scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
+ (u'Illegal expression', 21))
+
+ self.assertEqual(
+ scanstring('{"Illegal invocation": alert()}', 2, None, True),
+ (u'Illegal invocation', 21))
+
+ self.assertEqual(
+ scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
+ (u'Numbers cannot have leading zeroes', 37))
+
+ self.assertEqual(
+ scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
+ (u'Numbers cannot be hex', 24))
+
+ self.assertEqual(
+ scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
+ (u'Too deep', 30))
+
+ self.assertEqual(
+ scanstring('{"Missing colon" null}', 2, None, True),
+ (u'Missing colon', 16))
+
+ self.assertEqual(
+ scanstring('{"Double colon":: null}', 2, None, True),
+ (u'Double colon', 15))
+
+ self.assertEqual(
+ scanstring('{"Comma instead of colon", null}', 2, None, True),
+ (u'Comma instead of colon', 25))
+
+ self.assertEqual(
+ scanstring('["Colon instead of comma": false]', 2, None, True),
+ (u'Colon instead of comma', 25))
+
+ self.assertEqual(
+ scanstring('["Bad value", truth]', 2, None, True),
+ (u'Bad value', 12))
+
+ for c in map(chr, range(0x00, 0x1f)):
+ self.assertEqual(
+ scanstring(c + '"', 0, None, False),
+ (c, 2))
+ self.assertRaises(
+ ValueError,
+ scanstring, c + '"', 0, None, True)
+
+ self.assertRaises(ValueError, scanstring, '', 0, None, True)
+ self.assertRaises(ValueError, scanstring, 'a', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\u', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\u0', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\u01', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\u012', 0, None, True)
+ self.assertRaises(ValueError, scanstring, '\\u0123', 0, None, True)
+ if sys.maxunicode > 65535:
+ self.assertRaises(ValueError,
+ scanstring, '\\ud834\\u"', 0, None, True)
+ self.assertRaises(ValueError,
+ scanstring, '\\ud834\\x0123"', 0, None, True)
+
+ def test_issue3623(self):
+ self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1,
+ "xxx")
+ self.assertRaises(UnicodeDecodeError,
+ json.encoder.encode_basestring_ascii, b("xx\xff"))
+
+ def test_overflow(self):
+ # Python 2.5 does not have maxsize, Python 3 does not have maxint
+ maxsize = getattr(sys, 'maxsize', getattr(sys, 'maxint', None))
+ assert maxsize is not None
+ self.assertRaises(OverflowError, json.decoder.scanstring, "xxx",
+ maxsize + 1)
+
+ def test_surrogates(self):
+ scanstring = json.decoder.scanstring
+
+ def assertScan(given, expect, test_utf8=True):
+ givens = [given]
+ if not PY3 and test_utf8:
+ givens.append(given.encode('utf8'))
+ for given in givens:
+ (res, count) = scanstring(given, 1, None, True)
+ self.assertEqual(len(given), count)
+ self.assertEqual(res, expect)
+
+ assertScan(
+ u'"z\\ud834\\u0079x"',
+ u'z\ud834yx')
+ assertScan(
+ u'"z\\ud834\\udd20x"',
+ u'z\U0001d120x')
+ assertScan(
+ u'"z\\ud834\\ud834\\udd20x"',
+ u'z\ud834\U0001d120x')
+ assertScan(
+ u'"z\\ud834x"',
+ u'z\ud834x')
+ assertScan(
+ u'"z\\udd20x"',
+ u'z\udd20x')
+ assertScan(
+ u'"z\ud834x"',
+ u'z\ud834x')
+ # It may look strange to join strings together, but Python is drunk.
+ # https://gist.github.com/etrepum/5538443
+ assertScan(
+ u'"z\\ud834\udd20x12345"',
+ u''.join([u'z\ud834', u'\udd20x12345']))
+ assertScan(
+ u'"z\ud834\\udd20x"',
+ u''.join([u'z\ud834', u'\udd20x']))
+ # these have different behavior given UTF8 input, because the surrogate
+ # pair may be joined (in maxunicode > 65535 builds)
+ assertScan(
+ u''.join([u'"z\ud834', u'\udd20x"']),
+ u''.join([u'z\ud834', u'\udd20x']),
+ test_utf8=False)
+
+ self.assertRaises(ValueError,
+ scanstring, u'"z\\ud83x"', 1, None, True)
+ self.assertRaises(ValueError,
+ scanstring, u'"z\\ud834\\udd2x"', 1, None, True)
diff --git a/app/lib/simplejson/tests/test_separators.py b/app/lib/simplejson/tests/test_separators.py
new file mode 100644
index 0000000..91b4d4f
--- /dev/null
+++ b/app/lib/simplejson/tests/test_separators.py
@@ -0,0 +1,42 @@
+import textwrap
+from unittest import TestCase
+
+import simplejson as json
+
+
+class TestSeparators(TestCase):
+ def test_separators(self):
+ h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
+ {'nifty': 87}, {'field': 'yes', 'morefield': False} ]
+
+ expect = textwrap.dedent("""\
+ [
+ [
+ "blorpie"
+ ] ,
+ [
+ "whoops"
+ ] ,
+ [] ,
+ "d-shtaeou" ,
+ "d-nthiouh" ,
+ "i-vhbjkhnth" ,
+ {
+ "nifty" : 87
+ } ,
+ {
+ "field" : "yes" ,
+ "morefield" : false
+ }
+ ]""")
+
+
+ d1 = json.dumps(h)
+ d2 = json.dumps(h, indent=' ', sort_keys=True, separators=(' ,', ' : '))
+
+ h1 = json.loads(d1)
+ h2 = json.loads(d2)
+
+ self.assertEqual(h1, h)
+ self.assertEqual(h2, h)
+ self.assertEqual(d2, expect)
diff --git a/app/lib/simplejson/tests/test_speedups.py b/app/lib/simplejson/tests/test_speedups.py
new file mode 100644
index 0000000..0a2b63b
--- /dev/null
+++ b/app/lib/simplejson/tests/test_speedups.py
@@ -0,0 +1,39 @@
+import sys
+import unittest
+from unittest import TestCase
+
+from simplejson import encoder, scanner
+
+
+def has_speedups():
+ return encoder.c_make_encoder is not None
+
+
+def skip_if_speedups_missing(func):
+ def wrapper(*args, **kwargs):
+ if not has_speedups():
+ if hasattr(unittest, 'SkipTest'):
+ raise unittest.SkipTest("C Extension not available")
+ else:
+ sys.stdout.write("C Extension not available")
+ return
+ return func(*args, **kwargs)
+
+ return wrapper
+
+
+class TestDecode(TestCase):
+ @skip_if_speedups_missing
+ def test_make_scanner(self):
+ self.assertRaises(AttributeError, scanner.c_make_scanner, 1)
+
+ @skip_if_speedups_missing
+ def test_make_encoder(self):
+ self.assertRaises(
+ TypeError,
+ encoder.c_make_encoder,
+ None,
+ ("\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7"
+ "\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75"),
+ None
+ )
diff --git a/app/lib/simplejson/tests/test_str_subclass.py b/app/lib/simplejson/tests/test_str_subclass.py
new file mode 100644
index 0000000..771eb67
--- /dev/null
+++ b/app/lib/simplejson/tests/test_str_subclass.py
@@ -0,0 +1,16 @@
+from unittest import TestCase
+
+import simplejson
+from simplejson.compat import text_type, u
+
+# Tests for issue demonstrated in https://github.com/simplejson/simplejson/issues/144
+class WonkyTextSubclass(text_type):
+ def __getslice__(self, start, end):
+ return self.__class__('not what you wanted!')
+
+class TestStrSubclass(TestCase):
+ def test_dump_load(self):
+ for s in ['', '"hello"', 'text', u('\u005c')]:
+ self.assertEqual(
+ s,
+ simplejson.loads(simplejson.dumps(WonkyTextSubclass(s))))
diff --git a/app/lib/simplejson/tests/test_subclass.py b/app/lib/simplejson/tests/test_subclass.py
new file mode 100644
index 0000000..2bae3b6
--- /dev/null
+++ b/app/lib/simplejson/tests/test_subclass.py
@@ -0,0 +1,37 @@
+from unittest import TestCase
+import simplejson as json
+
+from decimal import Decimal
+
+class AlternateInt(int):
+ def __repr__(self):
+ return 'invalid json'
+ __str__ = __repr__
+
+
+class AlternateFloat(float):
+ def __repr__(self):
+ return 'invalid json'
+ __str__ = __repr__
+
+
+# class AlternateDecimal(Decimal):
+# def __repr__(self):
+# return 'invalid json'
+
+
+class TestSubclass(TestCase):
+ def test_int(self):
+ self.assertEqual(json.dumps(AlternateInt(1)), '1')
+ self.assertEqual(json.dumps(AlternateInt(-1)), '-1')
+ self.assertEqual(json.loads(json.dumps({AlternateInt(1): 1})), {'1': 1})
+
+ def test_float(self):
+ self.assertEqual(json.dumps(AlternateFloat(1.0)), '1.0')
+ self.assertEqual(json.dumps(AlternateFloat(-1.0)), '-1.0')
+ self.assertEqual(json.loads(json.dumps({AlternateFloat(1.0): 1})), {'1.0': 1})
+
+ # NOTE: Decimal subclasses are not supported as-is
+ # def test_decimal(self):
+ # self.assertEqual(json.dumps(AlternateDecimal('1.0')), '1.0')
+ # self.assertEqual(json.dumps(AlternateDecimal('-1.0')), '-1.0')
diff --git a/app/lib/simplejson/tests/test_tool.py b/app/lib/simplejson/tests/test_tool.py
new file mode 100644
index 0000000..ac2a14c
--- /dev/null
+++ b/app/lib/simplejson/tests/test_tool.py
@@ -0,0 +1,97 @@
+from __future__ import with_statement
+import os
+import sys
+import textwrap
+import unittest
+import subprocess
+import tempfile
+try:
+ # Python 3.x
+ from test.support import strip_python_stderr
+except ImportError:
+ # Python 2.6+
+ try:
+ from test.test_support import strip_python_stderr
+ except ImportError:
+ # Python 2.5
+ import re
+ def strip_python_stderr(stderr):
+ return re.sub(
+ r"\[\d+ refs\]\r?\n?$".encode(),
+ "".encode(),
+ stderr).strip()
+
+class TestTool(unittest.TestCase):
+ data = """
+
+ [["blorpie"],[ "whoops" ] , [
+ ],\t"d-shtaeou",\r"d-nthiouh",
+ "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field"
+ :"yes"} ]
+ """
+
+ expect = textwrap.dedent("""\
+ [
+ [
+ "blorpie"
+ ],
+ [
+ "whoops"
+ ],
+ [],
+ "d-shtaeou",
+ "d-nthiouh",
+ "i-vhbjkhnth",
+ {
+ "nifty": 87
+ },
+ {
+ "field": "yes",
+ "morefield": false
+ }
+ ]
+ """)
+
+ def runTool(self, args=None, data=None):
+ argv = [sys.executable, '-m', 'simplejson.tool']
+ if args:
+ argv.extend(args)
+ proc = subprocess.Popen(argv,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ out, err = proc.communicate(data)
+ self.assertEqual(strip_python_stderr(err), ''.encode())
+ self.assertEqual(proc.returncode, 0)
+ return out
+
+ def test_stdin_stdout(self):
+ self.assertEqual(
+ self.runTool(data=self.data.encode()),
+ self.expect.encode())
+
+ def test_infile_stdout(self):
+ with tempfile.NamedTemporaryFile() as infile:
+ infile.write(self.data.encode())
+ infile.flush()
+ self.assertEqual(
+ self.runTool(args=[infile.name]),
+ self.expect.encode())
+
+ def test_infile_outfile(self):
+ with tempfile.NamedTemporaryFile() as infile:
+ infile.write(self.data.encode())
+ infile.flush()
+ # outfile will get overwritten by tool, so the delete
+ # may not work on some platforms. Do it manually.
+ outfile = tempfile.NamedTemporaryFile()
+ try:
+ self.assertEqual(
+ self.runTool(args=[infile.name, outfile.name]),
+ ''.encode())
+ with open(outfile.name, 'rb') as f:
+ self.assertEqual(f.read(), self.expect.encode())
+ finally:
+ outfile.close()
+ if os.path.exists(outfile.name):
+ os.unlink(outfile.name)
diff --git a/app/lib/simplejson/tests/test_tuple.py b/app/lib/simplejson/tests/test_tuple.py
new file mode 100644
index 0000000..4ad7b0e
--- /dev/null
+++ b/app/lib/simplejson/tests/test_tuple.py
@@ -0,0 +1,47 @@
+import unittest
+
+from simplejson.compat import StringIO
+import simplejson as json
+
+class TestTuples(unittest.TestCase):
+ def test_tuple_array_dumps(self):
+ t = (1, 2, 3)
+ expect = json.dumps(list(t))
+ # Default is True
+ self.assertEqual(expect, json.dumps(t))
+ self.assertEqual(expect, json.dumps(t, tuple_as_array=True))
+ self.assertRaises(TypeError, json.dumps, t, tuple_as_array=False)
+ # Ensure that the "default" does not get called
+ self.assertEqual(expect, json.dumps(t, default=repr))
+ self.assertEqual(expect, json.dumps(t, tuple_as_array=True,
+ default=repr))
+ # Ensure that the "default" gets called
+ self.assertEqual(
+ json.dumps(repr(t)),
+ json.dumps(t, tuple_as_array=False, default=repr))
+
+ def test_tuple_array_dump(self):
+ t = (1, 2, 3)
+ expect = json.dumps(list(t))
+ # Default is True
+ sio = StringIO()
+ json.dump(t, sio)
+ self.assertEqual(expect, sio.getvalue())
+ sio = StringIO()
+ json.dump(t, sio, tuple_as_array=True)
+ self.assertEqual(expect, sio.getvalue())
+ self.assertRaises(TypeError, json.dump, t, StringIO(),
+ tuple_as_array=False)
+ # Ensure that the "default" does not get called
+ sio = StringIO()
+ json.dump(t, sio, default=repr)
+ self.assertEqual(expect, sio.getvalue())
+ sio = StringIO()
+ json.dump(t, sio, tuple_as_array=True, default=repr)
+ self.assertEqual(expect, sio.getvalue())
+ # Ensure that the "default" gets called
+ sio = StringIO()
+ json.dump(t, sio, tuple_as_array=False, default=repr)
+ self.assertEqual(
+ json.dumps(repr(t)),
+ sio.getvalue())
diff --git a/app/lib/simplejson/tests/test_unicode.py b/app/lib/simplejson/tests/test_unicode.py
new file mode 100644
index 0000000..3b37f65
--- /dev/null
+++ b/app/lib/simplejson/tests/test_unicode.py
@@ -0,0 +1,153 @@
+import sys
+import codecs
+from unittest import TestCase
+
+import simplejson as json
+from simplejson.compat import unichr, text_type, b, u, BytesIO
+
+class TestUnicode(TestCase):
+ def test_encoding1(self):
+ encoder = json.JSONEncoder(encoding='utf-8')
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ s = u.encode('utf-8')
+ ju = encoder.encode(u)
+ js = encoder.encode(s)
+ self.assertEqual(ju, js)
+
+ def test_encoding2(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ s = u.encode('utf-8')
+ ju = json.dumps(u, encoding='utf-8')
+ js = json.dumps(s, encoding='utf-8')
+ self.assertEqual(ju, js)
+
+ def test_encoding3(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps(u)
+ self.assertEqual(j, '"\\u03b1\\u03a9"')
+
+ def test_encoding4(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps([u])
+ self.assertEqual(j, '["\\u03b1\\u03a9"]')
+
+ def test_encoding5(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps(u, ensure_ascii=False)
+ self.assertEqual(j, u'"' + u + u'"')
+
+ def test_encoding6(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps([u], ensure_ascii=False)
+ self.assertEqual(j, u'["' + u + u'"]')
+
+ def test_big_unicode_encode(self):
+ u = u'\U0001d120'
+ self.assertEqual(json.dumps(u), '"\\ud834\\udd20"')
+ self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
+
+ def test_big_unicode_decode(self):
+ u = u'z\U0001d120x'
+ self.assertEqual(json.loads('"' + u + '"'), u)
+ self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u)
+
+ def test_unicode_decode(self):
+ for i in range(0, 0xd7ff):
+ u = unichr(i)
+ #s = '"\\u{0:04x}"'.format(i)
+ s = '"\\u%04x"' % (i,)
+ self.assertEqual(json.loads(s), u)
+
+ def test_object_pairs_hook_with_unicode(self):
+ s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
+ p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
+ (u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
+ self.assertEqual(json.loads(s), eval(s))
+ self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
+ od = json.loads(s, object_pairs_hook=json.OrderedDict)
+ self.assertEqual(od, json.OrderedDict(p))
+ self.assertEqual(type(od), json.OrderedDict)
+ # the object_pairs_hook takes priority over the object_hook
+ self.assertEqual(json.loads(s,
+ object_pairs_hook=json.OrderedDict,
+ object_hook=lambda x: None),
+ json.OrderedDict(p))
+
+
+ def test_default_encoding(self):
+ self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
+ {'a': u'\xe9'})
+
+ def test_unicode_preservation(self):
+ self.assertEqual(type(json.loads(u'""')), text_type)
+ self.assertEqual(type(json.loads(u'"a"')), text_type)
+ self.assertEqual(type(json.loads(u'["a"]')[0]), text_type)
+
+ def test_ensure_ascii_false_returns_unicode(self):
+ # http://code.google.com/p/simplejson/issues/detail?id=48
+ self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type)
+ self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type)
+ self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type)
+ self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type)
+
+ def test_ensure_ascii_false_bytestring_encoding(self):
+ # http://code.google.com/p/simplejson/issues/detail?id=48
+ doc1 = {u'quux': b('Arr\xc3\xaat sur images')}
+ doc2 = {u'quux': u('Arr\xeat sur images')}
+ doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
+ doc_unicode = u'{"quux": "Arr\xeat sur images"}'
+ self.assertEqual(json.dumps(doc1), doc_ascii)
+ self.assertEqual(json.dumps(doc2), doc_ascii)
+ self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode)
+ self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode)
+
+ def test_ensure_ascii_linebreak_encoding(self):
+ # http://timelessrepo.com/json-isnt-a-javascript-subset
+ s1 = u'\u2029\u2028'
+ s2 = s1.encode('utf8')
+ expect = '"\\u2029\\u2028"'
+ self.assertEqual(json.dumps(s1), expect)
+ self.assertEqual(json.dumps(s2), expect)
+ self.assertEqual(json.dumps(s1, ensure_ascii=False), expect)
+ self.assertEqual(json.dumps(s2, ensure_ascii=False), expect)
+
+ def test_invalid_escape_sequences(self):
+ # incomplete escape sequence
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234')
+ # invalid escape sequence
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"')
+ if sys.maxunicode > 65535:
+ # invalid escape sequence for low surrogate
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"')
+ self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"')
+
+ def test_ensure_ascii_still_works(self):
+ # in the ascii range, ensure that everything is the same
+ for c in map(unichr, range(0, 127)):
+ self.assertEqual(
+ json.dumps(c, ensure_ascii=False),
+ json.dumps(c))
+ snowman = u'\N{SNOWMAN}'
+ self.assertEqual(
+ json.dumps(c, ensure_ascii=False),
+ '"' + c + '"')
+
+ def test_strip_bom(self):
+ content = u"\u3053\u3093\u306b\u3061\u308f"
+ json_doc = codecs.BOM_UTF8 + b(json.dumps(content))
+ self.assertEqual(json.load(BytesIO(json_doc)), content)
+ for doc in json_doc, json_doc.decode('utf8'):
+ self.assertEqual(json.loads(doc), content)
diff --git a/app/lib/simplejson/tool.py b/app/lib/simplejson/tool.py
new file mode 100644
index 0000000..062e8e2
--- /dev/null
+++ b/app/lib/simplejson/tool.py
@@ -0,0 +1,42 @@
+r"""Command-line tool to validate and pretty-print JSON
+
+Usage::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+
+"""
+from __future__ import with_statement
+import sys
+import simplejson as json
+
+def main():
+ if len(sys.argv) == 1:
+ infile = sys.stdin
+ outfile = sys.stdout
+ elif len(sys.argv) == 2:
+ infile = open(sys.argv[1], 'r')
+ outfile = sys.stdout
+ elif len(sys.argv) == 3:
+ infile = open(sys.argv[1], 'r')
+ outfile = open(sys.argv[2], 'w')
+ else:
+ raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+ with infile:
+ try:
+ obj = json.load(infile,
+ object_pairs_hook=json.OrderedDict,
+ use_decimal=True)
+ except ValueError:
+ raise SystemExit(sys.exc_info()[1])
+ with outfile:
+ json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
+ outfile.write('\n')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst b/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000..2e2607d
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/DESCRIPTION.rst
@@ -0,0 +1,18 @@
+Six is a Python 2 and 3 compatibility library. It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions. See the
+documentation for more information on what is provided.
+
+Six supports every Python version since 2.6. It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at https://pythonhosted.org/six/.
+
+Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also
+be found there.
+
+For questions about six or porting in general, email the python-porting mailing
+list: https://mail.python.org/mailman/listinfo/python-porting
+
+
diff --git a/app/lib/six-1.10.0.dist-info/METADATA b/app/lib/six-1.10.0.dist-info/METADATA
new file mode 100644
index 0000000..4fc3d07
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/METADATA
@@ -0,0 +1,34 @@
+Metadata-Version: 2.0
+Name: six
+Version: 1.10.0
+Summary: Python 2 and 3 compatibility utilities
+Home-page: http://pypi.python.org/pypi/six/
+Author: Benjamin Peterson
+Author-email: benjamin@python.org
+License: MIT
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+
+Six is a Python 2 and 3 compatibility library. It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions. See the
+documentation for more information on what is provided.
+
+Six supports every Python version since 2.6. It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at https://pythonhosted.org/six/.
+
+Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also
+be found there.
+
+For questions about six or porting in general, email the python-porting mailing
+list: https://mail.python.org/mailman/listinfo/python-porting
+
+
diff --git a/app/lib/six-1.10.0.dist-info/RECORD b/app/lib/six-1.10.0.dist-info/RECORD
new file mode 100644
index 0000000..6350c4e
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/RECORD
@@ -0,0 +1,8 @@
+six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
+six-1.10.0.dist-info/DESCRIPTION.rst,sha256=QWBtSTT2zzabwJv1NQbTfClSX13m-Qc6tqU4TRL1RLs,774
+six-1.10.0.dist-info/METADATA,sha256=5HceJsUnHof2IRamlCKO2MwNjve1eSP4rLzVQDfwpCQ,1283
+six-1.10.0.dist-info/RECORD,,
+six-1.10.0.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110
+six-1.10.0.dist-info/metadata.json,sha256=jtOeeTBubYDChl_5Ql5ZPlKoHgg6rdqRIjOz1e5Ek2U,658
+six-1.10.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
+__pycache__/six.cpython-34.pyc,,
diff --git a/app/lib/six-1.10.0.dist-info/WHEEL b/app/lib/six-1.10.0.dist-info/WHEEL
new file mode 100644
index 0000000..0de529b
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.26.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/app/lib/six-1.10.0.dist-info/metadata.json b/app/lib/six-1.10.0.dist-info/metadata.json
new file mode 100644
index 0000000..21f9f6c
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/metadata.json
@@ -0,0 +1 @@
+{"generator": "bdist_wheel (0.26.0)", "summary": "Python 2 and 3 compatibility utilities", "classifiers": ["Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Utilities"], "extensions": {"python.details": {"project_urls": {"Home": "http://pypi.python.org/pypi/six/"}, "contacts": [{"email": "benjamin@python.org", "name": "Benjamin Peterson", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "MIT", "metadata_version": "2.0", "name": "six", "version": "1.10.0"}
\ No newline at end of file
diff --git a/app/lib/six-1.10.0.dist-info/top_level.txt b/app/lib/six-1.10.0.dist-info/top_level.txt
new file mode 100644
index 0000000..ffe2fce
--- /dev/null
+++ b/app/lib/six-1.10.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+six
diff --git a/app/lib/six.py b/app/lib/six.py
new file mode 100644
index 0000000..190c023
--- /dev/null
+++ b/app/lib/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson "
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ if from_value is None:
+ raise value
+ raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ raise value from from_value
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/app/lib/sqlalchemy/__init__.py b/app/lib/sqlalchemy/__init__.py
new file mode 100644
index 0000000..a2116e0
--- /dev/null
+++ b/app/lib/sqlalchemy/__init__.py
@@ -0,0 +1,146 @@
+# sqlalchemy/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+from .sql import (
+ alias,
+ all_,
+ and_,
+ any_,
+ asc,
+ between,
+ bindparam,
+ case,
+ cast,
+ collate,
+ column,
+ delete,
+ desc,
+ distinct,
+ except_,
+ except_all,
+ exists,
+ extract,
+ false,
+ func,
+ funcfilter,
+ insert,
+ intersect,
+ intersect_all,
+ join,
+ lateral,
+ literal,
+ literal_column,
+ modifier,
+ not_,
+ null,
+ or_,
+ outerjoin,
+ outparam,
+ over,
+ select,
+ subquery,
+ table,
+ tablesample,
+ text,
+ true,
+ tuple_,
+ type_coerce,
+ union,
+ union_all,
+ update,
+ within_group,
+ )
+
+from .types import (
+ ARRAY,
+ BIGINT,
+ BINARY,
+ BLOB,
+ BOOLEAN,
+ BigInteger,
+ Binary,
+ Boolean,
+ CHAR,
+ CLOB,
+ DATE,
+ DATETIME,
+ DECIMAL,
+ Date,
+ DateTime,
+ Enum,
+ FLOAT,
+ Float,
+ INT,
+ INTEGER,
+ Integer,
+ Interval,
+ JSON,
+ LargeBinary,
+ NCHAR,
+ NVARCHAR,
+ NUMERIC,
+ Numeric,
+ PickleType,
+ REAL,
+ SMALLINT,
+ SmallInteger,
+ String,
+ TEXT,
+ TIME,
+ TIMESTAMP,
+ Text,
+ Time,
+ TypeDecorator,
+ Unicode,
+ UnicodeText,
+ VARBINARY,
+ VARCHAR,
+ )
+
+
+from .schema import (
+ CheckConstraint,
+ Column,
+ ColumnDefault,
+ Constraint,
+ DefaultClause,
+ FetchedValue,
+ ForeignKey,
+ ForeignKeyConstraint,
+ Index,
+ MetaData,
+ PassiveDefault,
+ PrimaryKeyConstraint,
+ Sequence,
+ Table,
+ ThreadLocalMetaData,
+ UniqueConstraint,
+ DDL,
+ BLANK_SCHEMA
+)
+
+
+from .inspection import inspect
+from .engine import create_engine, engine_from_config
+
+__version__ = '1.1.9'
+
+
+def __go(lcls):
+ global __all__
+
+ from . import events
+ from . import util as _sa_util
+
+ import inspect as _inspect
+
+ __all__ = sorted(name for name, obj in lcls.items()
+ if not (name.startswith('_') or _inspect.ismodule(obj)))
+
+ _sa_util.dependencies.resolve_all("sqlalchemy")
+__go(locals())
diff --git a/app/lib/sqlalchemy/connectors/__init__.py b/app/lib/sqlalchemy/connectors/__init__.py
new file mode 100644
index 0000000..5cf06d8
--- /dev/null
+++ b/app/lib/sqlalchemy/connectors/__init__.py
@@ -0,0 +1,10 @@
+# connectors/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+class Connector(object):
+ pass
diff --git a/app/lib/sqlalchemy/connectors/mxodbc.py b/app/lib/sqlalchemy/connectors/mxodbc.py
new file mode 100644
index 0000000..32e7e18
--- /dev/null
+++ b/app/lib/sqlalchemy/connectors/mxodbc.py
@@ -0,0 +1,150 @@
+# connectors/mxodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+Provide a SQLALchemy connector for the eGenix mxODBC commercial
+Python adapter for ODBC. This is not a free product, but eGenix
+provides SQLAlchemy with a license for use in continuous integration
+testing.
+
+This has been tested for use with mxODBC 3.1.2 on SQL Server 2005
+and 2008, using the SQL Server Native driver. However, it is
+possible for this to be used on other database platforms.
+
+For more info on mxODBC, see http://www.egenix.com/
+
+"""
+
+import sys
+import re
+import warnings
+
+from . import Connector
+
+
+class MxODBCConnector(Connector):
+ driver = 'mxodbc'
+
+ supports_sane_multi_rowcount = False
+ supports_unicode_statements = True
+ supports_unicode_binds = True
+
+ supports_native_decimal = True
+
+ @classmethod
+ def dbapi(cls):
+ # this classmethod will normally be replaced by an instance
+ # attribute of the same name, so this is normally only called once.
+ cls._load_mx_exceptions()
+ platform = sys.platform
+ if platform == 'win32':
+ from mx.ODBC import Windows as module
+ # this can be the string "linux2", and possibly others
+ elif 'linux' in platform:
+ from mx.ODBC import unixODBC as module
+ elif platform == 'darwin':
+ from mx.ODBC import iODBC as module
+ else:
+ raise ImportError("Unrecognized platform for mxODBC import")
+ return module
+
+ @classmethod
+ def _load_mx_exceptions(cls):
+ """ Import mxODBC exception classes into the module namespace,
+ as if they had been imported normally. This is done here
+ to avoid requiring all SQLAlchemy users to install mxODBC.
+ """
+ global InterfaceError, ProgrammingError
+ from mx.ODBC import InterfaceError
+ from mx.ODBC import ProgrammingError
+
+ def on_connect(self):
+ def connect(conn):
+ conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
+ conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
+ conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
+ conn.errorhandler = self._error_handler()
+ return connect
+
+ def _error_handler(self):
+ """ Return a handler that adjusts mxODBC's raised Warnings to
+ emit Python standard warnings.
+ """
+ from mx.ODBC.Error import Warning as MxOdbcWarning
+
+ def error_handler(connection, cursor, errorclass, errorvalue):
+ if issubclass(errorclass, MxOdbcWarning):
+ errorclass.__bases__ = (Warning,)
+ warnings.warn(message=str(errorvalue),
+ category=errorclass,
+ stacklevel=2)
+ else:
+ raise errorclass(errorvalue)
+ return error_handler
+
+ def create_connect_args(self, url):
+ """ Return a tuple of *args,**kwargs for creating a connection.
+
+ The mxODBC 3.x connection constructor looks like this:
+
+ connect(dsn, user='', password='',
+ clear_auto_commit=1, errorhandler=None)
+
+ This method translates the values in the provided uri
+ into args and kwargs needed to instantiate an mxODBC Connection.
+
+ The arg 'errorhandler' is not used by SQLAlchemy and will
+ not be populated.
+
+ """
+ opts = url.translate_connect_args(username='user')
+ opts.update(url.query)
+ args = opts.pop('host')
+ opts.pop('port', None)
+ opts.pop('database', None)
+ return (args,), opts
+
+ def is_disconnect(self, e, connection, cursor):
+ # TODO: eGenix recommends checking connection.closed here
+ # Does that detect dropped connections ?
+ if isinstance(e, self.dbapi.ProgrammingError):
+ return "connection already closed" in str(e)
+ elif isinstance(e, self.dbapi.Error):
+ return '[08S01]' in str(e)
+ else:
+ return False
+
+ def _get_server_version_info(self, connection):
+ # eGenix suggests using conn.dbms_version instead
+ # of what we're doing here
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ # 18 == pyodbc.SQL_DBMS_VER
+ for n in r.split(dbapi_con.getinfo(18)[1]):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+ def _get_direct(self, context):
+ if context:
+ native_odbc_execute = context.execution_options.\
+ get('native_odbc_execute', 'auto')
+ # default to direct=True in all cases, is more generally
+ # compatible especially with SQL Server
+ return False if native_odbc_execute is True else True
+ else:
+ return True
+
+ def do_executemany(self, cursor, statement, parameters, context=None):
+ cursor.executemany(
+ statement, parameters, direct=self._get_direct(context))
+
+ def do_execute(self, cursor, statement, parameters, context=None):
+ cursor.execute(statement, parameters, direct=self._get_direct(context))
diff --git a/app/lib/sqlalchemy/connectors/pyodbc.py b/app/lib/sqlalchemy/connectors/pyodbc.py
new file mode 100644
index 0000000..ee8445d
--- /dev/null
+++ b/app/lib/sqlalchemy/connectors/pyodbc.py
@@ -0,0 +1,196 @@
+# connectors/pyodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from . import Connector
+from .. import util
+
+
+import sys
+import re
+
+
+class PyODBCConnector(Connector):
+ driver = 'pyodbc'
+
+ supports_sane_multi_rowcount = False
+
+ if util.py2k:
+ # PyODBC unicode is broken on UCS-4 builds
+ supports_unicode = sys.maxunicode == 65535
+ supports_unicode_statements = supports_unicode
+
+ supports_native_decimal = True
+ default_paramstyle = 'named'
+
+ # for non-DSN connections, this *may* be used to
+ # hold the desired driver name
+ pyodbc_driver_name = None
+
+ # will be set to True after initialize()
+ # if the freetds.so is detected
+ freetds = False
+
+ # will be set to the string version of
+ # the FreeTDS driver if freetds is detected
+ freetds_driver_version = None
+
+ # will be set to True after initialize()
+ # if the libessqlsrv.so is detected
+ easysoft = False
+
+ def __init__(self, supports_unicode_binds=None, **kw):
+ super(PyODBCConnector, self).__init__(**kw)
+ self._user_supports_unicode_binds = supports_unicode_binds
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('pyodbc')
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ opts.update(url.query)
+
+ keys = opts
+
+ query = url.query
+
+ connect_args = {}
+ for param in ('ansi', 'unicode_results', 'autocommit'):
+ if param in keys:
+ connect_args[param] = util.asbool(keys.pop(param))
+
+ if 'odbc_connect' in keys:
+ connectors = [util.unquote_plus(keys.pop('odbc_connect'))]
+ else:
+ def check_quote(token):
+ if ";" in str(token):
+ token = "'%s'" % token
+ return token
+
+ keys = dict(
+ (k, check_quote(v)) for k, v in keys.items()
+ )
+
+ dsn_connection = 'dsn' in keys or \
+ ('host' in keys and 'database' not in keys)
+ if dsn_connection:
+ connectors = ['dsn=%s' % (keys.pop('host', '') or
+ keys.pop('dsn', ''))]
+ else:
+ port = ''
+ if 'port' in keys and 'port' not in query:
+ port = ',%d' % int(keys.pop('port'))
+
+ connectors = []
+ driver = keys.pop('driver', self.pyodbc_driver_name)
+ if driver is None:
+ util.warn(
+ "No driver name specified; "
+ "this is expected by PyODBC when using "
+ "DSN-less connections")
+ else:
+ connectors.append("DRIVER={%s}" % driver)
+
+ connectors.extend(
+ [
+ 'Server=%s%s' % (keys.pop('host', ''), port),
+ 'Database=%s' % keys.pop('database', '')
+ ])
+
+ user = keys.pop("user", None)
+ if user:
+ connectors.append("UID=%s" % user)
+ connectors.append("PWD=%s" % keys.pop('password', ''))
+ else:
+ connectors.append("Trusted_Connection=Yes")
+
+ # if set to 'Yes', the ODBC layer will try to automagically
+ # convert textual data from your database encoding to your
+ # client encoding. This should obviously be set to 'No' if
+ # you query a cp1253 encoded database from a latin1 client...
+ if 'odbc_autotranslate' in keys:
+ connectors.append("AutoTranslate=%s" %
+ keys.pop("odbc_autotranslate"))
+
+ connectors.extend(['%s=%s' % (k, v) for k, v in keys.items()])
+
+ return [[";".join(connectors)], connect_args]
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, self.dbapi.ProgrammingError):
+ return "The cursor's connection has been closed." in str(e) or \
+ 'Attempt to use a closed connection.' in str(e)
+ elif isinstance(e, self.dbapi.Error):
+ return '[08S01]' in str(e)
+ else:
+ return False
+
+ def initialize(self, connection):
+ # determine FreeTDS first. can't issue SQL easily
+ # without getting unicode_statements/binds set up.
+
+ pyodbc = self.dbapi
+
+ dbapi_con = connection.connection
+
+ _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME)
+ self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name
+ ))
+ self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name
+ ))
+
+ if self.freetds:
+ self.freetds_driver_version = dbapi_con.getinfo(
+ pyodbc.SQL_DRIVER_VER)
+
+ self.supports_unicode_statements = (
+ not util.py2k or
+ (not self.freetds and not self.easysoft)
+ )
+
+ if self._user_supports_unicode_binds is not None:
+ self.supports_unicode_binds = self._user_supports_unicode_binds
+ elif util.py2k:
+ self.supports_unicode_binds = (
+ not self.freetds or self.freetds_driver_version >= '0.91'
+ ) and not self.easysoft
+ else:
+ self.supports_unicode_binds = True
+
+ # run other initialization which asks for user name, etc.
+ super(PyODBCConnector, self).initialize(connection)
+
+ def _dbapi_version(self):
+ if not self.dbapi:
+ return ()
+ return self._parse_dbapi_version(self.dbapi.version)
+
+ def _parse_dbapi_version(self, vers):
+ m = re.match(
+ r'(?:py.*-)?([\d\.]+)(?:-(\w+))?',
+ vers
+ )
+ if not m:
+ return ()
+ vers = tuple([int(x) for x in m.group(1).split(".")])
+ if m.group(2):
+ vers += (m.group(2),)
+ return vers
+
+ def _get_server_version_info(self, connection):
+ # NOTE: this function is not reliable, particularly when
+ # freetds is in use. Implement database-specific server version
+ # queries.
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
diff --git a/app/lib/sqlalchemy/connectors/zxJDBC.py b/app/lib/sqlalchemy/connectors/zxJDBC.py
new file mode 100644
index 0000000..8a5b749
--- /dev/null
+++ b/app/lib/sqlalchemy/connectors/zxJDBC.py
@@ -0,0 +1,60 @@
+# connectors/zxJDBC.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import sys
+from . import Connector
+
+
+class ZxJDBCConnector(Connector):
+ driver = 'zxjdbc'
+
+ supports_sane_rowcount = False
+ supports_sane_multi_rowcount = False
+
+ supports_unicode_binds = True
+ supports_unicode_statements = sys.version > '2.5.0+'
+ description_encoding = None
+ default_paramstyle = 'qmark'
+
+ jdbc_db_name = None
+ jdbc_driver_name = None
+
+ @classmethod
+ def dbapi(cls):
+ from com.ziclix.python.sql import zxJDBC
+ return zxJDBC
+
+ def _driver_kwargs(self):
+ """Return kw arg dict to be sent to connect()."""
+ return {}
+
+ def _create_jdbc_url(self, url):
+ """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
+ return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
+ url.port is not None
+ and ':%s' % url.port or '',
+ url.database)
+
+ def create_connect_args(self, url):
+ opts = self._driver_kwargs()
+ opts.update(url.query)
+ return [
+ [self._create_jdbc_url(url),
+ url.username, url.password,
+ self.jdbc_driver_name],
+ opts]
+
+ def is_disconnect(self, e, connection, cursor):
+ if not isinstance(e, self.dbapi.ProgrammingError):
+ return False
+ e = str(e)
+ return 'connection is closed' in e or 'cursor is closed' in e
+
+ def _get_server_version_info(self, connection):
+ # use connection.connection.dbversion, and parse appropriately
+ # to get a tuple
+ raise NotImplementedError()
diff --git a/app/lib/sqlalchemy/cprocessors.so b/app/lib/sqlalchemy/cprocessors.so
new file mode 100755
index 0000000..f64d60c
Binary files /dev/null and b/app/lib/sqlalchemy/cprocessors.so differ
diff --git a/app/lib/sqlalchemy/cresultproxy.so b/app/lib/sqlalchemy/cresultproxy.so
new file mode 100755
index 0000000..c00c034
Binary files /dev/null and b/app/lib/sqlalchemy/cresultproxy.so differ
diff --git a/app/lib/sqlalchemy/cutils.so b/app/lib/sqlalchemy/cutils.so
new file mode 100755
index 0000000..81c6b44
Binary files /dev/null and b/app/lib/sqlalchemy/cutils.so differ
diff --git a/app/lib/sqlalchemy/databases/__init__.py b/app/lib/sqlalchemy/databases/__init__.py
new file mode 100644
index 0000000..3fb659d
--- /dev/null
+++ b/app/lib/sqlalchemy/databases/__init__.py
@@ -0,0 +1,30 @@
+# databases/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""Include imports from the sqlalchemy.dialects package for backwards
+compatibility with pre 0.6 versions.
+
+"""
+from ..dialects.sqlite import base as sqlite
+from ..dialects.postgresql import base as postgresql
+postgres = postgresql
+from ..dialects.mysql import base as mysql
+from ..dialects.oracle import base as oracle
+from ..dialects.firebird import base as firebird
+from ..dialects.mssql import base as mssql
+from ..dialects.sybase import base as sybase
+
+
+__all__ = (
+ 'firebird',
+ 'mssql',
+ 'mysql',
+ 'postgresql',
+ 'sqlite',
+ 'oracle',
+ 'sybase',
+)
diff --git a/app/lib/sqlalchemy/dialects/__init__.py b/app/lib/sqlalchemy/dialects/__init__.py
new file mode 100644
index 0000000..44051f0
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/__init__.py
@@ -0,0 +1,56 @@
+# dialects/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+__all__ = (
+ 'firebird',
+ 'mssql',
+ 'mysql',
+ 'oracle',
+ 'postgresql',
+ 'sqlite',
+ 'sybase',
+)
+
+from .. import util
+
+_translates = {'postgres': 'postgresql'}
+
+def _auto_fn(name):
+ """default dialect importer.
+
+ plugs into the :class:`.PluginLoader`
+ as a first-hit system.
+
+ """
+ if "." in name:
+ dialect, driver = name.split(".")
+ else:
+ dialect = name
+ driver = "base"
+
+ if dialect in _translates:
+ translated = _translates[dialect]
+ util.warn_deprecated(
+ "The '%s' dialect name has been "
+ "renamed to '%s'" % (dialect, translated)
+ )
+ dialect = translated
+ try:
+ module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects
+ except ImportError:
+ return None
+
+ module = getattr(module, dialect)
+ if hasattr(module, driver):
+ module = getattr(module, driver)
+ return lambda: module.dialect
+ else:
+ return None
+
+registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
+
+plugins = util.PluginLoader("sqlalchemy.plugins")
\ No newline at end of file
diff --git a/app/lib/sqlalchemy/dialects/firebird/__init__.py b/app/lib/sqlalchemy/dialects/firebird/__init__.py
new file mode 100644
index 0000000..8dd9d11
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/firebird/__init__.py
@@ -0,0 +1,21 @@
+# firebird/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb
+
+base.dialect = fdb.dialect
+
+from sqlalchemy.dialects.firebird.base import \
+ SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \
+ TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\
+ dialect
+
+__all__ = (
+ 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
+ 'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
+ 'dialect'
+)
diff --git a/app/lib/sqlalchemy/dialects/firebird/base.py b/app/lib/sqlalchemy/dialects/firebird/base.py
new file mode 100644
index 0000000..7d4aca5
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/firebird/base.py
@@ -0,0 +1,741 @@
+# firebird/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+
+.. dialect:: firebird
+ :name: Firebird
+
+Firebird Dialects
+-----------------
+
+Firebird offers two distinct dialects_ (not to be confused with a
+SQLAlchemy ``Dialect``):
+
+dialect 1
+ This is the old syntax and behaviour, inherited from Interbase pre-6.0.
+
+dialect 3
+ This is the newer and supported syntax, introduced in Interbase 6.0.
+
+The SQLAlchemy Firebird dialect detects these versions and
+adjusts its representation of SQL accordingly. However,
+support for dialect 1 is not well tested and probably has
+incompatibilities.
+
+Locking Behavior
+----------------
+
+Firebird locks tables aggressively. For this reason, a DROP TABLE may
+hang until other transactions are released. SQLAlchemy does its best
+to release transactions as quickly as possible. The most common cause
+of hanging transactions is a non-fully consumed result set, i.e.::
+
+ result = engine.execute("select * from table")
+ row = result.fetchone()
+ return
+
+Where above, the ``ResultProxy`` has not been fully consumed. The
+connection will be returned to the pool and the transactional state
+rolled back once the Python garbage collector reclaims the objects
+which hold onto the connection, which often occurs asynchronously.
+The above use case can be alleviated by calling ``first()`` on the
+``ResultProxy`` which will fetch the first row and immediately close
+all remaining cursor/connection resources.
+
+RETURNING support
+-----------------
+
+Firebird 2.0 supports returning a result set from inserts, and 2.1
+extends that to deletes and updates. This is generically exposed by
+the SQLAlchemy ``returning()`` method, such as::
+
+ # INSERT..RETURNING
+ result = table.insert().returning(table.c.col1, table.c.col2).\
+ values(name='foo')
+ print result.fetchall()
+
+ # UPDATE..RETURNING
+ raises = empl.update().returning(empl.c.id, empl.c.salary).\
+ where(empl.c.sales>100).\
+ values(dict(salary=empl.c.salary * 1.1))
+ print raises.fetchall()
+
+
+.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
+
+"""
+
+import datetime
+
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import exc, types as sqltypes, sql, util
+from sqlalchemy.sql import expression
+from sqlalchemy.engine import base, default, reflection
+from sqlalchemy.sql import compiler
+from sqlalchemy.sql.elements import quoted_name
+
+from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
+ SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
+
+
+RESERVED_WORDS = set([
+ "active", "add", "admin", "after", "all", "alter", "and", "any", "as",
+ "asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
+ "bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
+ "character", "character_length", "char_length", "check", "close",
+ "collate", "column", "commit", "committed", "computed", "conditional",
+ "connect", "constraint", "containing", "count", "create", "cross",
+ "cstring", "current", "current_connection", "current_date",
+ "current_role", "current_time", "current_timestamp",
+ "current_transaction", "current_user", "cursor", "database", "date",
+ "day", "dec", "decimal", "declare", "default", "delete", "desc",
+ "descending", "disconnect", "distinct", "do", "domain", "double",
+ "drop", "else", "end", "entry_point", "escape", "exception",
+ "execute", "exists", "exit", "external", "extract", "fetch", "file",
+ "filter", "float", "for", "foreign", "from", "full", "function",
+ "gdscode", "generator", "gen_id", "global", "grant", "group",
+ "having", "hour", "if", "in", "inactive", "index", "inner",
+ "input_type", "insensitive", "insert", "int", "integer", "into", "is",
+ "isolation", "join", "key", "leading", "left", "length", "level",
+ "like", "long", "lower", "manual", "max", "maximum_segment", "merge",
+ "min", "minute", "module_name", "month", "names", "national",
+ "natural", "nchar", "no", "not", "null", "numeric", "octet_length",
+ "of", "on", "only", "open", "option", "or", "order", "outer",
+ "output_type", "overflow", "page", "pages", "page_size", "parameter",
+ "password", "plan", "position", "post_event", "precision", "primary",
+ "privileges", "procedure", "protected", "rdb$db_key", "read", "real",
+ "record_version", "recreate", "recursive", "references", "release",
+ "reserv", "reserving", "retain", "returning_values", "returns",
+ "revoke", "right", "rollback", "rows", "row_count", "savepoint",
+ "schema", "second", "segment", "select", "sensitive", "set", "shadow",
+ "shared", "singular", "size", "smallint", "snapshot", "some", "sort",
+ "sqlcode", "stability", "start", "starting", "starts", "statistics",
+ "sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
+ "to", "trailing", "transaction", "trigger", "trim", "uncommitted",
+ "union", "unique", "update", "upper", "user", "using", "value",
+ "values", "varchar", "variable", "varying", "view", "wait", "when",
+ "where", "while", "with", "work", "write", "year",
+])
+
+
+class _StringType(sqltypes.String):
+ """Base for Firebird string types."""
+
+ def __init__(self, charset=None, **kw):
+ self.charset = charset
+ super(_StringType, self).__init__(**kw)
+
+
+class VARCHAR(_StringType, sqltypes.VARCHAR):
+ """Firebird VARCHAR type"""
+ __visit_name__ = 'VARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ super(VARCHAR, self).__init__(length=length, **kwargs)
+
+
+class CHAR(_StringType, sqltypes.CHAR):
+ """Firebird CHAR type"""
+ __visit_name__ = 'CHAR'
+
+ def __init__(self, length=None, **kwargs):
+ super(CHAR, self).__init__(length=length, **kwargs)
+
+
+class _FBDateTime(sqltypes.DateTime):
+ def bind_processor(self, dialect):
+ def process(value):
+ if type(value) == datetime.date:
+ return datetime.datetime(value.year, value.month, value.day)
+ else:
+ return value
+ return process
+
+colspecs = {
+ sqltypes.DateTime: _FBDateTime
+}
+
+ischema_names = {
+ 'SHORT': SMALLINT,
+ 'LONG': INTEGER,
+ 'QUAD': FLOAT,
+ 'FLOAT': FLOAT,
+ 'DATE': DATE,
+ 'TIME': TIME,
+ 'TEXT': TEXT,
+ 'INT64': BIGINT,
+ 'DOUBLE': FLOAT,
+ 'TIMESTAMP': TIMESTAMP,
+ 'VARYING': VARCHAR,
+ 'CSTRING': CHAR,
+ 'BLOB': BLOB,
+}
+
+
+# TODO: date conversion types (should be implemented as _FBDateTime,
+# _FBDate, etc. as bind/result functionality is required)
+
+class FBTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_boolean(self, type_, **kw):
+ return self.visit_SMALLINT(type_, **kw)
+
+ def visit_datetime(self, type_, **kw):
+ return self.visit_TIMESTAMP(type_, **kw)
+
+ def visit_TEXT(self, type_, **kw):
+ return "BLOB SUB_TYPE 1"
+
+ def visit_BLOB(self, type_, **kw):
+ return "BLOB SUB_TYPE 0"
+
+ def _extend_string(self, type_, basic):
+ charset = getattr(type_, 'charset', None)
+ if charset is None:
+ return basic
+ else:
+ return '%s CHARACTER SET %s' % (basic, charset)
+
+ def visit_CHAR(self, type_, **kw):
+ basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
+ return self._extend_string(type_, basic)
+
+ def visit_VARCHAR(self, type_, **kw):
+ if not type_.length:
+ raise exc.CompileError(
+ "VARCHAR requires a length on dialect %s" %
+ self.dialect.name)
+ basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
+ return self._extend_string(type_, basic)
+
+
+class FBCompiler(sql.compiler.SQLCompiler):
+ """Firebird specific idiosyncrasies"""
+
+ ansi_bind_rules = True
+
+ # def visit_contains_op_binary(self, binary, operator, **kw):
+ # cant use CONTAINING b.c. it's case insensitive.
+
+ # def visit_notcontains_op_binary(self, binary, operator, **kw):
+ # cant use NOT CONTAINING b.c. it's case insensitive.
+
+ def visit_now_func(self, fn, **kw):
+ return "CURRENT_TIMESTAMP"
+
+ def visit_startswith_op_binary(self, binary, operator, **kw):
+ return '%s STARTING WITH %s' % (
+ binary.left._compiler_dispatch(self, **kw),
+ binary.right._compiler_dispatch(self, **kw))
+
+ def visit_notstartswith_op_binary(self, binary, operator, **kw):
+ return '%s NOT STARTING WITH %s' % (
+ binary.left._compiler_dispatch(self, **kw),
+ binary.right._compiler_dispatch(self, **kw))
+
+ def visit_mod_binary(self, binary, operator, **kw):
+ return "mod(%s, %s)" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def visit_alias(self, alias, asfrom=False, **kwargs):
+ if self.dialect._version_two:
+ return super(FBCompiler, self).\
+ visit_alias(alias, asfrom=asfrom, **kwargs)
+ else:
+ # Override to not use the AS keyword which FB 1.5 does not like
+ if asfrom:
+ alias_name = isinstance(alias.name,
+ expression._truncated_label) and \
+ self._truncated_identifier("alias",
+ alias.name) or alias.name
+
+ return self.process(
+ alias.original, asfrom=asfrom, **kwargs) + \
+ " " + \
+ self.preparer.format_alias(alias, alias_name)
+ else:
+ return self.process(alias.original, **kwargs)
+
+ def visit_substring_func(self, func, **kw):
+ s = self.process(func.clauses.clauses[0])
+ start = self.process(func.clauses.clauses[1])
+ if len(func.clauses.clauses) > 2:
+ length = self.process(func.clauses.clauses[2])
+ return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
+ else:
+ return "SUBSTRING(%s FROM %s)" % (s, start)
+
+ def visit_length_func(self, function, **kw):
+ if self.dialect._version_two:
+ return "char_length" + self.function_argspec(function)
+ else:
+ return "strlen" + self.function_argspec(function)
+
+ visit_char_length_func = visit_length_func
+
+ def function_argspec(self, func, **kw):
+ # TODO: this probably will need to be
+ # narrowed to a fixed list, some no-arg functions
+ # may require parens - see similar example in the oracle
+ # dialect
+ if func.clauses is not None and len(func.clauses):
+ return self.process(func.clause_expr, **kw)
+ else:
+ return ""
+
+ def default_from(self):
+ return " FROM rdb$database"
+
+ def visit_sequence(self, seq):
+ return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
+
+ def get_select_precolumns(self, select, **kw):
+ """Called when building a ``SELECT`` statement, position is just
+ before column list Firebird puts the limit and offset right
+ after the ``SELECT``...
+ """
+
+ result = ""
+ if select._limit_clause is not None:
+ result += "FIRST %s " % self.process(select._limit_clause, **kw)
+ if select._offset_clause is not None:
+ result += "SKIP %s " % self.process(select._offset_clause, **kw)
+ if select._distinct:
+ result += "DISTINCT "
+ return result
+
+ def limit_clause(self, select, **kw):
+ """Already taken care of in the `get_select_precolumns` method."""
+
+ return ""
+
+ def returning_clause(self, stmt, returning_cols):
+ columns = [
+ self._label_select_column(None, c, True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
+
+ return 'RETURNING ' + ', '.join(columns)
+
+
+class FBDDLCompiler(sql.compiler.DDLCompiler):
+ """Firebird syntactic idiosyncrasies"""
+
+ def visit_create_sequence(self, create):
+ """Generate a ``CREATE GENERATOR`` statement for the sequence."""
+
+ # no syntax for these
+ # http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
+ if create.element.start is not None:
+ raise NotImplemented(
+ "Firebird SEQUENCE doesn't support START WITH")
+ if create.element.increment is not None:
+ raise NotImplemented(
+ "Firebird SEQUENCE doesn't support INCREMENT BY")
+
+ if self.dialect._version_two:
+ return "CREATE SEQUENCE %s" % \
+ self.preparer.format_sequence(create.element)
+ else:
+ return "CREATE GENERATOR %s" % \
+ self.preparer.format_sequence(create.element)
+
+ def visit_drop_sequence(self, drop):
+ """Generate a ``DROP GENERATOR`` statement for the sequence."""
+
+ if self.dialect._version_two:
+ return "DROP SEQUENCE %s" % \
+ self.preparer.format_sequence(drop.element)
+ else:
+ return "DROP GENERATOR %s" % \
+ self.preparer.format_sequence(drop.element)
+
+
+class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
+ """Install Firebird specific reserved words."""
+
+ reserved_words = RESERVED_WORDS
+ illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
+ ['_'])
+
+ def __init__(self, dialect):
+ super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
+
+
+class FBExecutionContext(default.DefaultExecutionContext):
+ def fire_sequence(self, seq, type_):
+ """Get the next value from the sequence using ``gen_id()``."""
+
+ return self._execute_scalar(
+ "SELECT gen_id(%s, 1) FROM rdb$database" %
+ self.dialect.identifier_preparer.format_sequence(seq),
+ type_
+ )
+
+
+class FBDialect(default.DefaultDialect):
+ """Firebird dialect"""
+
+ name = 'firebird'
+
+ max_identifier_length = 31
+
+ supports_sequences = True
+ sequences_optional = False
+ supports_default_values = True
+ postfetch_lastrowid = False
+
+ supports_native_boolean = False
+
+ requires_name_normalize = True
+ supports_empty_insert = False
+
+ statement_compiler = FBCompiler
+ ddl_compiler = FBDDLCompiler
+ preparer = FBIdentifierPreparer
+ type_compiler = FBTypeCompiler
+ execution_ctx_cls = FBExecutionContext
+
+ colspecs = colspecs
+ ischema_names = ischema_names
+
+ construct_arguments = []
+
+ # defaults to dialect ver. 3,
+ # will be autodetected off upon
+ # first connect
+ _version_two = True
+
+ def initialize(self, connection):
+ super(FBDialect, self).initialize(connection)
+ self._version_two = ('firebird' in self.server_version_info and
+ self.server_version_info >= (2, )
+ ) or \
+ ('interbase' in self.server_version_info and
+ self.server_version_info >= (6, )
+ )
+
+ if not self._version_two:
+ # TODO: whatever other pre < 2.0 stuff goes here
+ self.ischema_names = ischema_names.copy()
+ self.ischema_names['TIMESTAMP'] = sqltypes.DATE
+ self.colspecs = {
+ sqltypes.DateTime: sqltypes.DATE
+ }
+
+ self.implicit_returning = self._version_two and \
+ self.__dict__.get('implicit_returning', True)
+
+ def normalize_name(self, name):
+ # Remove trailing spaces: FB uses a CHAR() type,
+ # that is padded with spaces
+ name = name and name.rstrip()
+ if name is None:
+ return None
+ elif name.upper() == name and \
+ not self.identifier_preparer._requires_quotes(name.lower()):
+ return name.lower()
+ elif name.lower() == name:
+ return quoted_name(name, quote=True)
+ else:
+ return name
+
+ def denormalize_name(self, name):
+ if name is None:
+ return None
+ elif name.lower() == name and \
+ not self.identifier_preparer._requires_quotes(name.lower()):
+ return name.upper()
+ else:
+ return name
+
+ def has_table(self, connection, table_name, schema=None):
+ """Return ``True`` if the given table exists, ignoring
+ the `schema`."""
+
+ tblqry = """
+ SELECT 1 AS has_table FROM rdb$database
+ WHERE EXISTS (SELECT rdb$relation_name
+ FROM rdb$relations
+ WHERE rdb$relation_name=?)
+ """
+ c = connection.execute(tblqry, [self.denormalize_name(table_name)])
+ return c.first() is not None
+
+ def has_sequence(self, connection, sequence_name, schema=None):
+ """Return ``True`` if the given sequence (generator) exists."""
+
+ genqry = """
+ SELECT 1 AS has_sequence FROM rdb$database
+ WHERE EXISTS (SELECT rdb$generator_name
+ FROM rdb$generators
+ WHERE rdb$generator_name=?)
+ """
+ c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
+ return c.first() is not None
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ # there are two queries commonly mentioned for this.
+ # this one, using view_blr, is at the Firebird FAQ among other places:
+ # http://www.firebirdfaq.org/faq174/
+ s = """
+ select rdb$relation_name
+ from rdb$relations
+ where rdb$view_blr is null
+ and (rdb$system_flag is null or rdb$system_flag = 0);
+ """
+
+ # the other query is this one. It's not clear if there's really
+ # any difference between these two. This link:
+ # http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
+ # states them as interchangeable. Some discussion at [ticket:2898]
+ # SELECT DISTINCT rdb$relation_name
+ # FROM rdb$relation_fields
+ # WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
+
+ return [self.normalize_name(row[0]) for row in connection.execute(s)]
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ # see http://www.firebirdfaq.org/faq174/
+ s = """
+ select rdb$relation_name
+ from rdb$relations
+ where rdb$view_blr is not null
+ and (rdb$system_flag is null or rdb$system_flag = 0);
+ """
+ return [self.normalize_name(row[0]) for row in connection.execute(s)]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+ qry = """
+ SELECT rdb$view_source AS view_source
+ FROM rdb$relations
+ WHERE rdb$relation_name=?
+ """
+ rp = connection.execute(qry, [self.denormalize_name(view_name)])
+ row = rp.first()
+ if row:
+ return row['view_source']
+ else:
+ return None
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ # Query to extract the PK/FK constrained fields of the given table
+ keyqry = """
+ SELECT se.rdb$field_name AS fname
+ FROM rdb$relation_constraints rc
+ JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
+ WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
+ """
+ tablename = self.denormalize_name(table_name)
+ # get primary key fields
+ c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
+ pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
+ return {'constrained_columns': pkfields, 'name': None}
+
+ @reflection.cache
+ def get_column_sequence(self, connection,
+ table_name, column_name,
+ schema=None, **kw):
+ tablename = self.denormalize_name(table_name)
+ colname = self.denormalize_name(column_name)
+ # Heuristic-query to determine the generator associated to a PK field
+ genqry = """
+ SELECT trigdep.rdb$depended_on_name AS fgenerator
+ FROM rdb$dependencies tabdep
+ JOIN rdb$dependencies trigdep
+ ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
+ AND trigdep.rdb$depended_on_type=14
+ AND trigdep.rdb$dependent_type=2
+ JOIN rdb$triggers trig ON
+ trig.rdb$trigger_name=tabdep.rdb$dependent_name
+ WHERE tabdep.rdb$depended_on_name=?
+ AND tabdep.rdb$depended_on_type=0
+ AND trig.rdb$trigger_type=1
+ AND tabdep.rdb$field_name=?
+ AND (SELECT count(*)
+ FROM rdb$dependencies trigdep2
+ WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
+ """
+ genr = connection.execute(genqry, [tablename, colname]).first()
+ if genr is not None:
+ return dict(name=self.normalize_name(genr['fgenerator']))
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ # Query to extract the details of all the fields of the given table
+ tblqry = """
+ SELECT r.rdb$field_name AS fname,
+ r.rdb$null_flag AS null_flag,
+ t.rdb$type_name AS ftype,
+ f.rdb$field_sub_type AS stype,
+ f.rdb$field_length/
+ COALESCE(cs.rdb$bytes_per_character,1) AS flen,
+ f.rdb$field_precision AS fprec,
+ f.rdb$field_scale AS fscale,
+ COALESCE(r.rdb$default_source,
+ f.rdb$default_source) AS fdefault
+ FROM rdb$relation_fields r
+ JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
+ JOIN rdb$types t
+ ON t.rdb$type=f.rdb$field_type AND
+ t.rdb$field_name='RDB$FIELD_TYPE'
+ LEFT JOIN rdb$character_sets cs ON
+ f.rdb$character_set_id=cs.rdb$character_set_id
+ WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
+ ORDER BY r.rdb$field_position
+ """
+ # get the PK, used to determine the eventual associated sequence
+ pk_constraint = self.get_pk_constraint(connection, table_name)
+ pkey_cols = pk_constraint['constrained_columns']
+
+ tablename = self.denormalize_name(table_name)
+ # get all of the fields for this table
+ c = connection.execute(tblqry, [tablename])
+ cols = []
+ while True:
+ row = c.fetchone()
+ if row is None:
+ break
+ name = self.normalize_name(row['fname'])
+ orig_colname = row['fname']
+
+ # get the data type
+ colspec = row['ftype'].rstrip()
+ coltype = self.ischema_names.get(colspec)
+ if coltype is None:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (colspec, name))
+ coltype = sqltypes.NULLTYPE
+ elif issubclass(coltype, Integer) and row['fprec'] != 0:
+ coltype = NUMERIC(
+ precision=row['fprec'],
+ scale=row['fscale'] * -1)
+ elif colspec in ('VARYING', 'CSTRING'):
+ coltype = coltype(row['flen'])
+ elif colspec == 'TEXT':
+ coltype = TEXT(row['flen'])
+ elif colspec == 'BLOB':
+ if row['stype'] == 1:
+ coltype = TEXT()
+ else:
+ coltype = BLOB()
+ else:
+ coltype = coltype()
+
+ # does it have a default value?
+ defvalue = None
+ if row['fdefault'] is not None:
+ # the value comes down as "DEFAULT 'value'": there may be
+ # more than one whitespace around the "DEFAULT" keyword
+ # and it may also be lower case
+ # (see also http://tracker.firebirdsql.org/browse/CORE-356)
+ defexpr = row['fdefault'].lstrip()
+ assert defexpr[:8].rstrip().upper() == \
+ 'DEFAULT', "Unrecognized default value: %s" % \
+ defexpr
+ defvalue = defexpr[8:].strip()
+ if defvalue == 'NULL':
+ # Redundant
+ defvalue = None
+ col_d = {
+ 'name': name,
+ 'type': coltype,
+ 'nullable': not bool(row['null_flag']),
+ 'default': defvalue,
+ 'autoincrement': 'auto',
+ }
+
+ if orig_colname.lower() == orig_colname:
+ col_d['quote'] = True
+
+ # if the PK is a single field, try to see if its linked to
+ # a sequence thru a trigger
+ if len(pkey_cols) == 1 and name == pkey_cols[0]:
+ seq_d = self.get_column_sequence(connection, tablename, name)
+ if seq_d is not None:
+ col_d['sequence'] = seq_d
+
+ cols.append(col_d)
+ return cols
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+ # Query to extract the details of each UK/FK of the given table
+ fkqry = """
+ SELECT rc.rdb$constraint_name AS cname,
+ cse.rdb$field_name AS fname,
+ ix2.rdb$relation_name AS targetrname,
+ se.rdb$field_name AS targetfname
+ FROM rdb$relation_constraints rc
+ JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
+ JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
+ JOIN rdb$index_segments cse ON
+ cse.rdb$index_name=ix1.rdb$index_name
+ JOIN rdb$index_segments se
+ ON se.rdb$index_name=ix2.rdb$index_name
+ AND se.rdb$field_position=cse.rdb$field_position
+ WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
+ ORDER BY se.rdb$index_name, se.rdb$field_position
+ """
+ tablename = self.denormalize_name(table_name)
+
+ c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
+ fks = util.defaultdict(lambda: {
+ 'name': None,
+ 'constrained_columns': [],
+ 'referred_schema': None,
+ 'referred_table': None,
+ 'referred_columns': []
+ })
+
+ for row in c:
+ cname = self.normalize_name(row['cname'])
+ fk = fks[cname]
+ if not fk['name']:
+ fk['name'] = cname
+ fk['referred_table'] = self.normalize_name(row['targetrname'])
+ fk['constrained_columns'].append(
+ self.normalize_name(row['fname']))
+ fk['referred_columns'].append(
+ self.normalize_name(row['targetfname']))
+ return list(fks.values())
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+ qry = """
+ SELECT ix.rdb$index_name AS index_name,
+ ix.rdb$unique_flag AS unique_flag,
+ ic.rdb$field_name AS field_name
+ FROM rdb$indices ix
+ JOIN rdb$index_segments ic
+ ON ix.rdb$index_name=ic.rdb$index_name
+ LEFT OUTER JOIN rdb$relation_constraints
+ ON rdb$relation_constraints.rdb$index_name =
+ ic.rdb$index_name
+ WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
+ AND rdb$relation_constraints.rdb$constraint_type IS NULL
+ ORDER BY index_name, ic.rdb$field_position
+ """
+ c = connection.execute(qry, [self.denormalize_name(table_name)])
+
+ indexes = util.defaultdict(dict)
+ for row in c:
+ indexrec = indexes[row['index_name']]
+ if 'name' not in indexrec:
+ indexrec['name'] = self.normalize_name(row['index_name'])
+ indexrec['column_names'] = []
+ indexrec['unique'] = bool(row['unique_flag'])
+
+ indexrec['column_names'].append(
+ self.normalize_name(row['field_name']))
+
+ return list(indexes.values())
diff --git a/app/lib/sqlalchemy/dialects/firebird/fdb.py b/app/lib/sqlalchemy/dialects/firebird/fdb.py
new file mode 100644
index 0000000..d590df7
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/firebird/fdb.py
@@ -0,0 +1,118 @@
+# firebird/fdb.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: firebird+fdb
+ :name: fdb
+ :dbapi: pyodbc
+ :connectstring: firebird+fdb://user:password@host:port/path/to/db\
+[?key=value&key=value...]
+ :url: http://pypi.python.org/pypi/fdb/
+
+ fdb is a kinterbasdb compatible DBAPI for Firebird.
+
+ .. versionadded:: 0.8 - Support for the fdb Firebird driver.
+
+ .. versionchanged:: 0.9 - The fdb dialect is now the default dialect
+ under the ``firebird://`` URL space, as ``fdb`` is now the official
+ Python driver for Firebird.
+
+Arguments
+----------
+
+The ``fdb`` dialect is based on the
+:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not
+accept every argument that Kinterbasdb does.
+
+* ``enable_rowcount`` - True by default, setting this to False disables
+ the usage of "cursor.rowcount" with the
+ Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
+ after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
+ ResultProxy will return -1 for result.rowcount. The rationale here is
+ that Kinterbasdb requires a second round trip to the database when
+ .rowcount is called - since SQLA's resultproxy automatically closes
+ the cursor after a non-result-returning statement, rowcount must be
+ called, if at all, before the result object is returned. Additionally,
+ cursor.rowcount may not return correct results with older versions
+ of Firebird, and setting this flag to False will also cause the
+ SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
+ per-execution basis using the ``enable_rowcount`` option with
+ :meth:`.Connection.execution_options`::
+
+ conn = engine.connect().execution_options(enable_rowcount=True)
+ r = conn.execute(stmt)
+ print r.rowcount
+
+* ``retaining`` - False by default. Setting this to True will pass the
+ ``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
+ methods of the DBAPI connection, which can improve performance in some
+ situations, but apparently with significant caveats.
+ Please read the fdb and/or kinterbasdb DBAPI documentation in order to
+ understand the implications of this flag.
+
+ .. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying
+ transaction retaining behavior - in 0.8 it defaults to ``True``
+ for backwards compatibility.
+
+ .. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
+ In 0.8 it defaulted to ``True``.
+
+ .. seealso::
+
+ http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions
+ - information on the "retaining" flag.
+
+"""
+
+from .kinterbasdb import FBDialect_kinterbasdb
+from ... import util
+
+
+class FBDialect_fdb(FBDialect_kinterbasdb):
+
+ def __init__(self, enable_rowcount=True,
+ retaining=False, **kwargs):
+ super(FBDialect_fdb, self).__init__(
+ enable_rowcount=enable_rowcount,
+ retaining=retaining, **kwargs)
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('fdb')
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if opts.get('port'):
+ opts['host'] = "%s/%s" % (opts['host'], opts['port'])
+ del opts['port']
+ opts.update(url.query)
+
+ util.coerce_kw_type(opts, 'type_conv', int)
+
+ return ([], opts)
+
+ def _get_server_version_info(self, connection):
+ """Get the version of the Firebird server used by a connection.
+
+ Returns a tuple of (`major`, `minor`, `build`), three integers
+ representing the version of the attached server.
+ """
+
+ # This is the simpler approach (the other uses the services api),
+ # that for backward compatibility reasons returns a string like
+ # LI-V6.3.3.12981 Firebird 2.0
+ # where the first version is a fake one resembling the old
+ # Interbase signature.
+
+ isc_info_firebird_version = 103
+ fbconn = connection.connection
+
+ version = fbconn.db_info(isc_info_firebird_version)
+
+ return self._parse_version_info(version)
+
+dialect = FBDialect_fdb
diff --git a/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
new file mode 100644
index 0000000..b7c1563
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
@@ -0,0 +1,184 @@
+# firebird/kinterbasdb.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: firebird+kinterbasdb
+ :name: kinterbasdb
+ :dbapi: kinterbasdb
+ :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\
+[?key=value&key=value...]
+ :url: http://firebirdsql.org/index.php?op=devel&sub=python
+
+Arguments
+----------
+
+The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
+arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
+In addition, it also accepts the following:
+
+* ``type_conv`` - select the kind of mapping done on the types: by default
+ SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
+ the linked documents below for further information.
+
+* ``concurrency_level`` - set the backend policy with regards to threading
+ issues: by default SQLAlchemy uses policy 1. See the linked documents
+ below for further information.
+
+.. seealso::
+
+ http://sourceforge.net/projects/kinterbasdb
+
+ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
+
+ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
+
+"""
+
+from .base import FBDialect, FBExecutionContext
+from ... import util, types as sqltypes
+from re import match
+import decimal
+
+
+class _kinterbasdb_numeric(object):
+ def bind_processor(self, dialect):
+ def process(value):
+ if isinstance(value, decimal.Decimal):
+ return str(value)
+ else:
+ return value
+ return process
+
+
+class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
+ pass
+
+
+class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
+ pass
+
+
+class FBExecutionContext_kinterbasdb(FBExecutionContext):
+ @property
+ def rowcount(self):
+ if self.execution_options.get('enable_rowcount',
+ self.dialect.enable_rowcount):
+ return self.cursor.rowcount
+ else:
+ return -1
+
+
+class FBDialect_kinterbasdb(FBDialect):
+ driver = 'kinterbasdb'
+ supports_sane_rowcount = False
+ supports_sane_multi_rowcount = False
+ execution_ctx_cls = FBExecutionContext_kinterbasdb
+
+ supports_native_decimal = True
+
+ colspecs = util.update_copy(
+ FBDialect.colspecs,
+ {
+ sqltypes.Numeric: _FBNumeric_kinterbasdb,
+ sqltypes.Float: _FBFloat_kinterbasdb,
+ }
+
+ )
+
+ def __init__(self, type_conv=200, concurrency_level=1,
+ enable_rowcount=True,
+ retaining=False, **kwargs):
+ super(FBDialect_kinterbasdb, self).__init__(**kwargs)
+ self.enable_rowcount = enable_rowcount
+ self.type_conv = type_conv
+ self.concurrency_level = concurrency_level
+ self.retaining = retaining
+ if enable_rowcount:
+ self.supports_sane_rowcount = True
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('kinterbasdb')
+
+ def do_execute(self, cursor, statement, parameters, context=None):
+ # kinterbase does not accept a None, but wants an empty list
+ # when there are no arguments.
+ cursor.execute(statement, parameters or [])
+
+ def do_rollback(self, dbapi_connection):
+ dbapi_connection.rollback(self.retaining)
+
+ def do_commit(self, dbapi_connection):
+ dbapi_connection.commit(self.retaining)
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if opts.get('port'):
+ opts['host'] = "%s/%s" % (opts['host'], opts['port'])
+ del opts['port']
+ opts.update(url.query)
+
+ util.coerce_kw_type(opts, 'type_conv', int)
+
+ type_conv = opts.pop('type_conv', self.type_conv)
+ concurrency_level = opts.pop('concurrency_level',
+ self.concurrency_level)
+
+ if self.dbapi is not None:
+ initialized = getattr(self.dbapi, 'initialized', None)
+ if initialized is None:
+ # CVS rev 1.96 changed the name of the attribute:
+ # http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
+ # Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
+ initialized = getattr(self.dbapi, '_initialized', False)
+ if not initialized:
+ self.dbapi.init(type_conv=type_conv,
+ concurrency_level=concurrency_level)
+ return ([], opts)
+
+ def _get_server_version_info(self, connection):
+ """Get the version of the Firebird server used by a connection.
+
+ Returns a tuple of (`major`, `minor`, `build`), three integers
+ representing the version of the attached server.
+ """
+
+ # This is the simpler approach (the other uses the services api),
+ # that for backward compatibility reasons returns a string like
+ # LI-V6.3.3.12981 Firebird 2.0
+ # where the first version is a fake one resembling the old
+ # Interbase signature.
+
+ fbconn = connection.connection
+ version = fbconn.server_version
+
+ return self._parse_version_info(version)
+
+ def _parse_version_info(self, version):
+ m = match(
+ r'\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
+ if not m:
+ raise AssertionError(
+ "Could not determine version from string '%s'" % version)
+
+ if m.group(5) != None:
+ return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
+ else:
+ return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, (self.dbapi.OperationalError,
+ self.dbapi.ProgrammingError)):
+ msg = str(e)
+ return ('Unable to complete network request to host' in msg or
+ 'Invalid connection state' in msg or
+ 'Invalid cursor state' in msg or
+ 'connection shutdown' in msg)
+ else:
+ return False
+
+dialect = FBDialect_kinterbasdb
diff --git a/app/lib/sqlalchemy/dialects/mssql/__init__.py b/app/lib/sqlalchemy/dialects/mssql/__init__.py
new file mode 100644
index 0000000..6b70df3
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/__init__.py
@@ -0,0 +1,27 @@
+# mssql/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
+ pymssql, zxjdbc, mxodbc
+
+base.dialect = pyodbc.dialect
+
+from sqlalchemy.dialects.mssql.base import \
+ INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
+ NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
+ DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
+ BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
+ MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
+
+
+__all__ = (
+ 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
+ 'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
+ 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
+ 'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
+ 'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
+)
diff --git a/app/lib/sqlalchemy/dialects/mssql/adodbapi.py b/app/lib/sqlalchemy/dialects/mssql/adodbapi.py
new file mode 100644
index 0000000..221bf50
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/adodbapi.py
@@ -0,0 +1,87 @@
+# mssql/adodbapi.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mssql+adodbapi
+ :name: adodbapi
+ :dbapi: adodbapi
+ :connectstring: mssql+adodbapi://:@
+ :url: http://adodbapi.sourceforge.net/
+
+.. note::
+
+ The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
+ above at this time.
+
+"""
+import datetime
+from sqlalchemy import types as sqltypes, util
+from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
+import sys
+
+
+class MSDateTime_adodbapi(MSDateTime):
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ # adodbapi will return datetimes with empty time
+ # values as datetime.date() objects.
+ # Promote them back to full datetime.datetime()
+ if type(value) is datetime.date:
+ return datetime.datetime(value.year, value.month, value.day)
+ return value
+ return process
+
+
+class MSDialect_adodbapi(MSDialect):
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = True
+ supports_unicode = sys.maxunicode == 65535
+ supports_unicode_statements = True
+ driver = 'adodbapi'
+
+ @classmethod
+ def import_dbapi(cls):
+ import adodbapi as module
+ return module
+
+ colspecs = util.update_copy(
+ MSDialect.colspecs,
+ {
+ sqltypes.DateTime: MSDateTime_adodbapi
+ }
+ )
+
+ def create_connect_args(self, url):
+ def check_quote(token):
+ if ";" in str(token):
+ token = "'%s'" % token
+ return token
+
+ keys = dict(
+ (k, check_quote(v)) for k, v in url.query.items()
+ )
+
+ connectors = ["Provider=SQLOLEDB"]
+ if 'port' in keys:
+ connectors.append("Data Source=%s, %s" %
+ (keys.get("host"), keys.get("port")))
+ else:
+ connectors.append("Data Source=%s" % keys.get("host"))
+ connectors.append("Initial Catalog=%s" % keys.get("database"))
+ user = keys.get("user")
+ if user:
+ connectors.append("User Id=%s" % user)
+ connectors.append("Password=%s" % keys.get("password", ""))
+ else:
+ connectors.append("Integrated Security=SSPI")
+ return [[";".join(connectors)], {}]
+
+ def is_disconnect(self, e, connection, cursor):
+ return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
+ "'connection failure'" in str(e)
+
+dialect = MSDialect_adodbapi
diff --git a/app/lib/sqlalchemy/dialects/mssql/base.py b/app/lib/sqlalchemy/dialects/mssql/base.py
new file mode 100644
index 0000000..6975754
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/base.py
@@ -0,0 +1,2064 @@
+# mssql/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mssql
+ :name: Microsoft SQL Server
+
+
+Auto Increment Behavior
+-----------------------
+
+SQL Server provides so-called "auto incrementing" behavior using the
+``IDENTITY`` construct, which can be placed on an integer primary key.
+SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
+described at :paramref:`.Column.autoincrement`; this means
+that by default, the first integer primary key column in a :class:`.Table`
+will be considered to be the identity column and will generate DDL as such::
+
+ from sqlalchemy import Table, MetaData, Column, Integer
+
+ m = MetaData()
+ t = Table('t', m,
+ Column('id', Integer, primary_key=True),
+ Column('x', Integer))
+ m.create_all(engine)
+
+The above example will generate DDL as:
+
+.. sourcecode:: sql
+
+ CREATE TABLE t (
+ id INTEGER NOT NULL IDENTITY(1,1),
+ x INTEGER NULL,
+ PRIMARY KEY (id)
+ )
+
+For the case where this default generation of ``IDENTITY`` is not desired,
+specify ``autoincrement=False`` on all integer primary key columns::
+
+ m = MetaData()
+ t = Table('t', m,
+ Column('id', Integer, primary_key=True, autoincrement=False),
+ Column('x', Integer))
+ m.create_all(engine)
+
+.. note::
+
+ An INSERT statement which refers to an explicit value for such
+ a column is prohibited by SQL Server, however SQLAlchemy will detect this
+ and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
+ time. As this is not a high performing process, care should be taken to
+ set the ``autoincrement`` flag appropriately for columns that will not
+ actually require IDENTITY behavior.
+
+Controlling "Start" and "Increment"
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Specific control over the parameters of the ``IDENTITY`` value is supported
+using the :class:`.schema.Sequence` object. While this object normally
+represents an explicit "sequence" for supporting backends, on SQL Server it is
+re-purposed to specify behavior regarding the identity column, including
+support of the "start" and "increment" values::
+
+ from sqlalchemy import Table, Integer, Sequence, Column
+
+ Table('test', metadata,
+ Column('id', Integer,
+ Sequence('blah', start=100, increment=10),
+ primary_key=True),
+ Column('name', String(20))
+ ).create(some_engine)
+
+would yield:
+
+.. sourcecode:: sql
+
+ CREATE TABLE test (
+ id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
+ name VARCHAR(20) NULL,
+ )
+
+Note that the ``start`` and ``increment`` values for sequences are
+optional and will default to 1,1.
+
+INSERT behavior
+^^^^^^^^^^^^^^^^
+
+Handling of the ``IDENTITY`` column at INSERT time involves two key
+techniques. The most common is being able to fetch the "last inserted value"
+for a given ``IDENTITY`` column, a process which SQLAlchemy performs
+implicitly in many cases, most importantly within the ORM.
+
+The process for fetching this value has several variants:
+
+* In the vast majority of cases, RETURNING is used in conjunction with INSERT
+ statements on SQL Server in order to get newly generated primary key values:
+
+ .. sourcecode:: sql
+
+ INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
+
+* When RETURNING is not available or has been disabled via
+ ``implicit_returning=False``, either the ``scope_identity()`` function or
+ the ``@@identity`` variable is used; behavior varies by backend:
+
+ * when using PyODBC, the phrase ``; select scope_identity()`` will be
+ appended to the end of the INSERT statement; a second result set will be
+ fetched in order to receive the value. Given a table as::
+
+ t = Table('t', m, Column('id', Integer, primary_key=True),
+ Column('x', Integer),
+ implicit_returning=False)
+
+ an INSERT will look like:
+
+ .. sourcecode:: sql
+
+ INSERT INTO t (x) VALUES (?); select scope_identity()
+
+ * Other dialects such as pymssql will call upon
+ ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
+ statement. If the flag ``use_scope_identity=False`` is passed to
+ :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
+ is used instead.
+
+A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
+that refers to the identity column explicitly. The SQLAlchemy dialect will
+detect when an INSERT construct, created using a core :func:`.insert`
+construct (not a plain string SQL), refers to the identity column, and
+in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
+statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
+execution. Given this example::
+
+ m = MetaData()
+ t = Table('t', m, Column('id', Integer, primary_key=True),
+ Column('x', Integer))
+ m.create_all(engine)
+
+ engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
+
+The above column will be created with IDENTITY, however the INSERT statement
+we emit is specifying explicit values. In the echo output we can see
+how SQLAlchemy handles this:
+
+.. sourcecode:: sql
+
+ CREATE TABLE t (
+ id INTEGER NOT NULL IDENTITY(1,1),
+ x INTEGER NULL,
+ PRIMARY KEY (id)
+ )
+
+ COMMIT
+ SET IDENTITY_INSERT t ON
+ INSERT INTO t (id, x) VALUES (?, ?)
+ ((1, 1), (2, 2))
+ SET IDENTITY_INSERT t OFF
+ COMMIT
+
+
+
+This
+is an auxiliary use case suitable for testing and bulk insert scenarios.
+
+MAX on VARCHAR / NVARCHAR
+-------------------------
+
+SQL Server supports the special string "MAX" within the
+:class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes,
+to indicate "maximum length possible". The dialect currently handles this as
+a length of "None" in the base type, rather than supplying a
+dialect-specific version of these types, so that a base type
+specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
+more than one backend without using dialect-specific types.
+
+To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
+
+ my_table = Table(
+ 'my_table', metadata,
+ Column('my_data', VARCHAR(None)),
+ Column('my_n_data', NVARCHAR(None))
+ )
+
+
+Collation Support
+-----------------
+
+Character collations are supported by the base string types,
+specified by the string argument "collation"::
+
+ from sqlalchemy import VARCHAR
+ Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
+
+When such a column is associated with a :class:`.Table`, the
+CREATE TABLE statement for this column will yield::
+
+ login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
+
+.. versionadded:: 0.8 Character collations are now part of the base string
+ types.
+
+LIMIT/OFFSET Support
+--------------------
+
+MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
+supported directly through the ``TOP`` Transact SQL keyword::
+
+ select.limit
+
+will yield::
+
+ SELECT TOP n
+
+If using SQL Server 2005 or above, LIMIT with OFFSET
+support is available through the ``ROW_NUMBER OVER`` construct.
+For versions below 2005, LIMIT with OFFSET usage will fail.
+
+.. _mssql_isolation_level:
+
+Transaction Isolation Level
+---------------------------
+
+All SQL Server dialects support setting of transaction isolation level
+both via a dialect-specific parameter
+:paramref:`.create_engine.isolation_level`
+accepted by :func:`.create_engine`,
+as well as the :paramref:`.Connection.execution_options.isolation_level`
+argument as passed to
+:meth:`.Connection.execution_options`. This feature works by issuing the
+command ``SET TRANSACTION ISOLATION LEVEL `` for
+each new connection.
+
+To set isolation level using :func:`.create_engine`::
+
+ engine = create_engine(
+ "mssql+pyodbc://scott:tiger@ms_2008",
+ isolation_level="REPEATABLE READ"
+ )
+
+To set using per-connection execution options::
+
+ connection = engine.connect()
+ connection = connection.execution_options(
+ isolation_level="READ COMMITTED"
+ )
+
+Valid values for ``isolation_level`` include:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``SNAPSHOT`` - specific to SQL Server
+
+.. versionadded:: 1.1 support for isolation level setting on Microsoft
+ SQL Server.
+
+
+Nullability
+-----------
+MSSQL has support for three levels of column nullability. The default
+nullability allows nulls and is explicit in the CREATE TABLE
+construct::
+
+ name VARCHAR(20) NULL
+
+If ``nullable=None`` is specified then no specification is made. In
+other words the database's configured default is used. This will
+render::
+
+ name VARCHAR(20)
+
+If ``nullable`` is ``True`` or ``False`` then the column will be
+``NULL`` or ``NOT NULL`` respectively.
+
+Date / Time Handling
+--------------------
+DATE and TIME are supported. Bind parameters are converted
+to datetime.datetime() objects as required by most MSSQL drivers,
+and results are processed from strings if needed.
+The DATE and TIME types are not available for MSSQL 2005 and
+previous - if a server version below 2008 is detected, DDL
+for these types will be issued as DATETIME.
+
+.. _mssql_large_type_deprecation:
+
+Large Text/Binary Type Deprecation
+----------------------------------
+
+Per `SQL Server 2012/2014 Documentation `_,
+the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
+in a future release. SQLAlchemy normally relates these types to the
+:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
+
+In order to accommodate this change, a new flag ``deprecate_large_types``
+is added to the dialect, which will be automatically set based on detection
+of the server version in use, if not otherwise set by the user. The
+behavior of this flag is as follows:
+
+* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
+ :class:`.LargeBinary` datatypes, when used to render DDL, will render the
+ types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
+ respectively. This is a new behavior as of the addition of this flag.
+
+* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
+ :class:`.LargeBinary` datatypes, when used to render DDL, will render the
+ types ``NTEXT``, ``TEXT``, and ``IMAGE``,
+ respectively. This is the long-standing behavior of these types.
+
+* The flag begins with the value ``None``, before a database connection is
+ established. If the dialect is used to render DDL without the flag being
+ set, it is interpreted the same as ``False``.
+
+* On first connection, the dialect detects if SQL Server version 2012 or greater
+ is in use; if the flag is still at ``None``, it sets it to ``True`` or
+ ``False`` based on whether 2012 or greater is detected.
+
+* The flag can be set to either ``True`` or ``False`` when the dialect
+ is created, typically via :func:`.create_engine`::
+
+ eng = create_engine("mssql+pymssql://user:pass@host/db",
+ deprecate_large_types=True)
+
+* Complete control over whether the "old" or "new" types are rendered is
+ available in all SQLAlchemy versions by using the UPPERCASE type objects
+ instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
+ :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
+ fixed and always output exactly that type.
+
+.. versionadded:: 1.0.0
+
+.. _legacy_schema_rendering:
+
+Legacy Schema Mode
+------------------
+
+Very old versions of the MSSQL dialect introduced the behavior such that a
+schema-qualified table would be auto-aliased when used in a
+SELECT statement; given a table::
+
+ account_table = Table(
+ 'account', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('info', String(100)),
+ schema="customer_schema"
+ )
+
+this legacy mode of rendering would assume that "customer_schema.account"
+would not be accepted by all parts of the SQL statement, as illustrated
+below::
+
+ >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
+ >>> print(account_table.select().compile(eng))
+ SELECT account_1.id, account_1.info
+ FROM customer_schema.account AS account_1
+
+This mode of behavior is now off by default, as it appears to have served
+no purpose; however in the case that legacy applications rely upon it,
+it is available using the ``legacy_schema_aliasing`` argument to
+:func:`.create_engine` as illustrated above.
+
+.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
+ in version 1.0.5 to allow disabling of legacy mode for schemas now
+ defaults to False.
+
+
+.. _mssql_indexes:
+
+Clustered Index Support
+-----------------------
+
+The MSSQL dialect supports clustered indexes (and primary keys) via the
+``mssql_clustered`` option. This option is available to :class:`.Index`,
+:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
+
+To generate a clustered index::
+
+ Index("my_index", table.c.x, mssql_clustered=True)
+
+which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
+
+To generate a clustered primary key use::
+
+ Table('my_table', metadata,
+ Column('x', ...),
+ Column('y', ...),
+ PrimaryKeyConstraint("x", "y", mssql_clustered=True))
+
+which will render the table, for example, as::
+
+ CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
+ PRIMARY KEY CLUSTERED (x, y))
+
+Similarly, we can generate a clustered unique constraint using::
+
+ Table('my_table', metadata,
+ Column('x', ...),
+ Column('y', ...),
+ PrimaryKeyConstraint("x"),
+ UniqueConstraint("y", mssql_clustered=True),
+ )
+
+To explicitly request a non-clustered primary key (for example, when
+a separate clustered index is desired), use::
+
+ Table('my_table', metadata,
+ Column('x', ...),
+ Column('y', ...),
+ PrimaryKeyConstraint("x", "y", mssql_clustered=False))
+
+which will render the table, for example, as::
+
+ CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
+ PRIMARY KEY NONCLUSTERED (x, y))
+
+.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
+ to None, rather than False. ``mssql_clustered=False`` now explicitly
+ renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
+ clause entirely, allowing SQL Server defaults to take effect.
+
+
+MSSQL-Specific Index Options
+-----------------------------
+
+In addition to clustering, the MSSQL dialect supports other special options
+for :class:`.Index`.
+
+INCLUDE
+^^^^^^^
+
+The ``mssql_include`` option renders INCLUDE(colname) for the given string
+names::
+
+ Index("my_index", table.c.x, mssql_include=['y'])
+
+would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
+
+.. versionadded:: 0.8
+
+Index ordering
+^^^^^^^^^^^^^^
+
+Index ordering is available via functional expressions, such as::
+
+ Index("my_index", table.c.x.desc())
+
+would render the index as ``CREATE INDEX my_index ON table (x DESC)``
+
+.. versionadded:: 0.8
+
+.. seealso::
+
+ :ref:`schema_indexes_functional`
+
+Compatibility Levels
+--------------------
+MSSQL supports the notion of setting compatibility levels at the
+database level. This allows, for instance, to run a database that
+is compatible with SQL2000 while running on a SQL2005 database
+server. ``server_version_info`` will always return the database
+server version information (in this case SQL2005) and not the
+compatibility level information. Because of this, if running under
+a backwards compatibility mode SQAlchemy may attempt to use T-SQL
+statements that are unable to be parsed by the database server.
+
+Triggers
+--------
+
+SQLAlchemy by default uses OUTPUT INSERTED to get at newly
+generated primary key values via IDENTITY columns or other
+server side defaults. MS-SQL does not
+allow the usage of OUTPUT INSERTED on tables that have triggers.
+To disable the usage of OUTPUT INSERTED on a per-table basis,
+specify ``implicit_returning=False`` for each :class:`.Table`
+which has triggers::
+
+ Table('mytable', metadata,
+ Column('id', Integer, primary_key=True),
+ # ...,
+ implicit_returning=False
+ )
+
+Declarative form::
+
+ class MyClass(Base):
+ # ...
+ __table_args__ = {'implicit_returning':False}
+
+
+This option can also be specified engine-wide using the
+``implicit_returning=False`` argument on :func:`.create_engine`.
+
+.. _mssql_rowcount_versioning:
+
+Rowcount Support / ORM Versioning
+---------------------------------
+
+The SQL Server drivers have very limited ability to return the number
+of rows updated from an UPDATE or DELETE statement. In particular, the
+pymssql driver has no support, whereas the pyodbc driver can only return
+this value under certain conditions.
+
+In particular, updated rowcount is not available when OUTPUT INSERTED
+is used. This impacts the SQLAlchemy ORM's versioning feature when
+server-side versioning schemes are used. When
+using pyodbc, the "implicit_returning" flag needs to be set to false
+for any ORM mapped class that uses a version_id column in conjunction with
+a server-side version generator::
+
+ class MyTable(Base):
+ __tablename__ = 'mytable'
+ id = Column(Integer, primary_key=True)
+ stuff = Column(String(10))
+ timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
+ __mapper_args__ = {
+ 'version_id_col': timestamp,
+ 'version_id_generator': False,
+ }
+ __table_args__ = {
+ 'implicit_returning': False
+ }
+
+Without the implicit_returning flag above, the UPDATE statement will
+use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
+-1, causing the versioning logic to fail.
+
+Enabling Snapshot Isolation
+---------------------------
+
+Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
+isolation mode that locks entire tables, and causes even mildly concurrent
+applications to have long held locks and frequent deadlocks.
+Enabling snapshot isolation for the database as a whole is recommended
+for modern levels of concurrency support. This is accomplished via the
+following ALTER DATABASE commands executed at the SQL prompt::
+
+ ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
+
+ ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
+
+Background on SQL Server snapshot isolation is available at
+http://msdn.microsoft.com/en-us/library/ms175095.aspx.
+
+Known Issues
+------------
+
+* No support for more than one ``IDENTITY`` column per table
+* reflection of indexes does not work with versions older than
+ SQL Server 2005
+
+"""
+import datetime
+import operator
+import re
+
+from ... import sql, schema as sa_schema, exc, util
+from ...sql import compiler, expression, util as sql_util
+from ... import engine
+from ...engine import reflection, default
+from ... import types as sqltypes
+from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
+ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
+ TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
+
+
+from ...util import update_wrapper
+from . import information_schema as ischema
+
+# http://sqlserverbuilds.blogspot.com/
+MS_2016_VERSION = (13,)
+MS_2014_VERSION = (12,)
+MS_2012_VERSION = (11,)
+MS_2008_VERSION = (10,)
+MS_2005_VERSION = (9,)
+MS_2000_VERSION = (8,)
+
+RESERVED_WORDS = set(
+ ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
+ 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
+ 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
+ 'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
+ 'containstable', 'continue', 'convert', 'create', 'cross', 'current',
+ 'current_date', 'current_time', 'current_timestamp', 'current_user',
+ 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
+ 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
+ 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
+ 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
+ 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
+ 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
+ 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
+ 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
+ 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
+ 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
+ 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
+ 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
+ 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
+ 'reconfigure', 'references', 'replication', 'restore', 'restrict',
+ 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
+ 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
+ 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
+ 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
+ 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
+ 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
+ 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
+ 'writetext',
+ ])
+
+
+class REAL(sqltypes.REAL):
+ __visit_name__ = 'REAL'
+
+ def __init__(self, **kw):
+ # REAL is a synonym for FLOAT(24) on SQL server
+ kw['precision'] = 24
+ super(REAL, self).__init__(**kw)
+
+
+class TINYINT(sqltypes.Integer):
+ __visit_name__ = 'TINYINT'
+
+
+# MSSQL DATE/TIME types have varied behavior, sometimes returning
+# strings. MSDate/TIME check for everything, and always
+# filter bind parameters into datetime objects (required by pyodbc,
+# not sure about other dialects).
+
+class _MSDate(sqltypes.Date):
+
+ def bind_processor(self, dialect):
+ def process(value):
+ if type(value) == datetime.date:
+ return datetime.datetime(value.year, value.month, value.day)
+ else:
+ return value
+ return process
+
+ _reg = re.compile(r"(\d+)-(\d+)-(\d+)")
+
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ if isinstance(value, datetime.datetime):
+ return value.date()
+ elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a date value" % (value, ))
+ return datetime.date(*[
+ int(x or 0)
+ for x in m.groups()
+ ])
+ else:
+ return value
+ return process
+
+
+class TIME(sqltypes.TIME):
+
+ def __init__(self, precision=None, **kwargs):
+ self.precision = precision
+ super(TIME, self).__init__()
+
+ __zero_date = datetime.date(1900, 1, 1)
+
+ def bind_processor(self, dialect):
+ def process(value):
+ if isinstance(value, datetime.datetime):
+ value = datetime.datetime.combine(
+ self.__zero_date, value.time())
+ elif isinstance(value, datetime.time):
+ value = datetime.datetime.combine(self.__zero_date, value)
+ return value
+ return process
+
+ _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
+
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ if isinstance(value, datetime.datetime):
+ return value.time()
+ elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a time value" % (value, ))
+ return datetime.time(*[
+ int(x or 0)
+ for x in m.groups()])
+ else:
+ return value
+ return process
+_MSTime = TIME
+
+
+class _DateTimeBase(object):
+
+ def bind_processor(self, dialect):
+ def process(value):
+ if type(value) == datetime.date:
+ return datetime.datetime(value.year, value.month, value.day)
+ else:
+ return value
+ return process
+
+
+class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
+ pass
+
+
+class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
+ __visit_name__ = 'SMALLDATETIME'
+
+
+class DATETIME2(_DateTimeBase, sqltypes.DateTime):
+ __visit_name__ = 'DATETIME2'
+
+ def __init__(self, precision=None, **kw):
+ super(DATETIME2, self).__init__(**kw)
+ self.precision = precision
+
+
+# TODO: is this not an Interval ?
+class DATETIMEOFFSET(sqltypes.TypeEngine):
+ __visit_name__ = 'DATETIMEOFFSET'
+
+ def __init__(self, precision=None, **kwargs):
+ self.precision = precision
+
+
+class _StringType(object):
+
+ """Base for MSSQL string types."""
+
+ def __init__(self, collation=None):
+ super(_StringType, self).__init__(collation=collation)
+
+
+class NTEXT(sqltypes.UnicodeText):
+
+ """MSSQL NTEXT type, for variable-length unicode text up to 2^30
+ characters."""
+
+ __visit_name__ = 'NTEXT'
+
+
+class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
+ """The MSSQL VARBINARY type.
+
+ This type extends both :class:`.types.VARBINARY` and
+ :class:`.types.LargeBinary`. In "deprecate_large_types" mode,
+ the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
+ on SQL Server.
+
+ .. versionadded:: 1.0.0
+
+ .. seealso::
+
+ :ref:`mssql_large_type_deprecation`
+
+
+
+ """
+ __visit_name__ = 'VARBINARY'
+
+
+class IMAGE(sqltypes.LargeBinary):
+ __visit_name__ = 'IMAGE'
+
+
+class BIT(sqltypes.TypeEngine):
+ __visit_name__ = 'BIT'
+
+
+class MONEY(sqltypes.TypeEngine):
+ __visit_name__ = 'MONEY'
+
+
+class SMALLMONEY(sqltypes.TypeEngine):
+ __visit_name__ = 'SMALLMONEY'
+
+
+class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
+ __visit_name__ = "UNIQUEIDENTIFIER"
+
+
+class SQL_VARIANT(sqltypes.TypeEngine):
+ __visit_name__ = 'SQL_VARIANT'
+
+# old names.
+MSDateTime = _MSDateTime
+MSDate = _MSDate
+MSReal = REAL
+MSTinyInteger = TINYINT
+MSTime = TIME
+MSSmallDateTime = SMALLDATETIME
+MSDateTime2 = DATETIME2
+MSDateTimeOffset = DATETIMEOFFSET
+MSText = TEXT
+MSNText = NTEXT
+MSString = VARCHAR
+MSNVarchar = NVARCHAR
+MSChar = CHAR
+MSNChar = NCHAR
+MSBinary = BINARY
+MSVarBinary = VARBINARY
+MSImage = IMAGE
+MSBit = BIT
+MSMoney = MONEY
+MSSmallMoney = SMALLMONEY
+MSUniqueIdentifier = UNIQUEIDENTIFIER
+MSVariant = SQL_VARIANT
+
+ischema_names = {
+ 'int': INTEGER,
+ 'bigint': BIGINT,
+ 'smallint': SMALLINT,
+ 'tinyint': TINYINT,
+ 'varchar': VARCHAR,
+ 'nvarchar': NVARCHAR,
+ 'char': CHAR,
+ 'nchar': NCHAR,
+ 'text': TEXT,
+ 'ntext': NTEXT,
+ 'decimal': DECIMAL,
+ 'numeric': NUMERIC,
+ 'float': FLOAT,
+ 'datetime': DATETIME,
+ 'datetime2': DATETIME2,
+ 'datetimeoffset': DATETIMEOFFSET,
+ 'date': DATE,
+ 'time': TIME,
+ 'smalldatetime': SMALLDATETIME,
+ 'binary': BINARY,
+ 'varbinary': VARBINARY,
+ 'bit': BIT,
+ 'real': REAL,
+ 'image': IMAGE,
+ 'timestamp': TIMESTAMP,
+ 'money': MONEY,
+ 'smallmoney': SMALLMONEY,
+ 'uniqueidentifier': UNIQUEIDENTIFIER,
+ 'sql_variant': SQL_VARIANT,
+}
+
+
+class MSTypeCompiler(compiler.GenericTypeCompiler):
+ def _extend(self, spec, type_, length=None):
+ """Extend a string-type declaration with standard SQL
+ COLLATE annotations.
+
+ """
+
+ if getattr(type_, 'collation', None):
+ collation = 'COLLATE %s' % type_.collation
+ else:
+ collation = None
+
+ if not length:
+ length = type_.length
+
+ if length:
+ spec = spec + "(%s)" % length
+
+ return ' '.join([c for c in (spec, collation)
+ if c is not None])
+
+ def visit_FLOAT(self, type_, **kw):
+ precision = getattr(type_, 'precision', None)
+ if precision is None:
+ return "FLOAT"
+ else:
+ return "FLOAT(%(precision)s)" % {'precision': precision}
+
+ def visit_TINYINT(self, type_, **kw):
+ return "TINYINT"
+
+ def visit_DATETIMEOFFSET(self, type_, **kw):
+ if type_.precision is not None:
+ return "DATETIMEOFFSET(%s)" % type_.precision
+ else:
+ return "DATETIMEOFFSET"
+
+ def visit_TIME(self, type_, **kw):
+ precision = getattr(type_, 'precision', None)
+ if precision is not None:
+ return "TIME(%s)" % precision
+ else:
+ return "TIME"
+
+ def visit_DATETIME2(self, type_, **kw):
+ precision = getattr(type_, 'precision', None)
+ if precision is not None:
+ return "DATETIME2(%s)" % precision
+ else:
+ return "DATETIME2"
+
+ def visit_SMALLDATETIME(self, type_, **kw):
+ return "SMALLDATETIME"
+
+ def visit_unicode(self, type_, **kw):
+ return self.visit_NVARCHAR(type_, **kw)
+
+ def visit_text(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_VARCHAR(type_, **kw)
+ else:
+ return self.visit_TEXT(type_, **kw)
+
+ def visit_unicode_text(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_NVARCHAR(type_, **kw)
+ else:
+ return self.visit_NTEXT(type_, **kw)
+
+ def visit_NTEXT(self, type_, **kw):
+ return self._extend("NTEXT", type_)
+
+ def visit_TEXT(self, type_, **kw):
+ return self._extend("TEXT", type_)
+
+ def visit_VARCHAR(self, type_, **kw):
+ return self._extend("VARCHAR", type_, length=type_.length or 'max')
+
+ def visit_CHAR(self, type_, **kw):
+ return self._extend("CHAR", type_)
+
+ def visit_NCHAR(self, type_, **kw):
+ return self._extend("NCHAR", type_)
+
+ def visit_NVARCHAR(self, type_, **kw):
+ return self._extend("NVARCHAR", type_, length=type_.length or 'max')
+
+ def visit_date(self, type_, **kw):
+ if self.dialect.server_version_info < MS_2008_VERSION:
+ return self.visit_DATETIME(type_, **kw)
+ else:
+ return self.visit_DATE(type_, **kw)
+
+ def visit_time(self, type_, **kw):
+ if self.dialect.server_version_info < MS_2008_VERSION:
+ return self.visit_DATETIME(type_, **kw)
+ else:
+ return self.visit_TIME(type_, **kw)
+
+ def visit_large_binary(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_VARBINARY(type_, **kw)
+ else:
+ return self.visit_IMAGE(type_, **kw)
+
+ def visit_IMAGE(self, type_, **kw):
+ return "IMAGE"
+
+ def visit_VARBINARY(self, type_, **kw):
+ return self._extend(
+ "VARBINARY",
+ type_,
+ length=type_.length or 'max')
+
+ def visit_boolean(self, type_, **kw):
+ return self.visit_BIT(type_)
+
+ def visit_BIT(self, type_, **kw):
+ return "BIT"
+
+ def visit_MONEY(self, type_, **kw):
+ return "MONEY"
+
+ def visit_SMALLMONEY(self, type_, **kw):
+ return 'SMALLMONEY'
+
+ def visit_UNIQUEIDENTIFIER(self, type_, **kw):
+ return "UNIQUEIDENTIFIER"
+
+ def visit_SQL_VARIANT(self, type_, **kw):
+ return 'SQL_VARIANT'
+
+
+class MSExecutionContext(default.DefaultExecutionContext):
+ _enable_identity_insert = False
+ _select_lastrowid = False
+ _result_proxy = None
+ _lastrowid = None
+
+ def _opt_encode(self, statement):
+ if not self.dialect.supports_unicode_statements:
+ return self.dialect._encoder(statement)[0]
+ else:
+ return statement
+
+ def pre_exec(self):
+ """Activate IDENTITY_INSERT if needed."""
+
+ if self.isinsert:
+ tbl = self.compiled.statement.table
+ seq_column = tbl._autoincrement_column
+ insert_has_sequence = seq_column is not None
+
+ if insert_has_sequence:
+ self._enable_identity_insert = \
+ seq_column.key in self.compiled_parameters[0] or \
+ (
+ self.compiled.statement.parameters and (
+ (
+ self.compiled.statement._has_multi_parameters
+ and
+ seq_column.key in
+ self.compiled.statement.parameters[0]
+ ) or (
+ not
+ self.compiled.statement._has_multi_parameters
+ and
+ seq_column.key in
+ self.compiled.statement.parameters
+ )
+ )
+ )
+ else:
+ self._enable_identity_insert = False
+
+ self._select_lastrowid = not self.compiled.inline and \
+ insert_has_sequence and \
+ not self.compiled.returning and \
+ not self._enable_identity_insert and \
+ not self.executemany
+
+ if self._enable_identity_insert:
+ self.root_connection._cursor_execute(
+ self.cursor,
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s ON" %
+ self.dialect.identifier_preparer.format_table(tbl)),
+ (),
+ self)
+
+ def post_exec(self):
+ """Disable IDENTITY_INSERT if enabled."""
+
+ conn = self.root_connection
+ if self._select_lastrowid:
+ if self.dialect.use_scope_identity:
+ conn._cursor_execute(
+ self.cursor,
+ "SELECT scope_identity() AS lastrowid", (), self)
+ else:
+ conn._cursor_execute(self.cursor,
+ "SELECT @@identity AS lastrowid",
+ (),
+ self)
+ # fetchall() ensures the cursor is consumed without closing it
+ row = self.cursor.fetchall()[0]
+ self._lastrowid = int(row[0])
+
+ if (self.isinsert or self.isupdate or self.isdelete) and \
+ self.compiled.returning:
+ self._result_proxy = engine.FullyBufferedResultProxy(self)
+
+ if self._enable_identity_insert:
+ conn._cursor_execute(
+ self.cursor,
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer. format_table(
+ self.compiled.statement.table)),
+ (),
+ self)
+
+ def get_lastrowid(self):
+ return self._lastrowid
+
+ def handle_dbapi_exception(self, e):
+ if self._enable_identity_insert:
+ try:
+ self.cursor.execute(
+ self._opt_encode(
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer. format_table(
+ self.compiled.statement.table)))
+ except Exception:
+ pass
+
+ def get_result_proxy(self):
+ if self._result_proxy:
+ return self._result_proxy
+ else:
+ return engine.ResultProxy(self)
+
+
+class MSSQLCompiler(compiler.SQLCompiler):
+ returning_precedes_values = True
+
+ extract_map = util.update_copy(
+ compiler.SQLCompiler.extract_map,
+ {
+ 'doy': 'dayofyear',
+ 'dow': 'weekday',
+ 'milliseconds': 'millisecond',
+ 'microseconds': 'microsecond'
+ })
+
+ def __init__(self, *args, **kwargs):
+ self.tablealiases = {}
+ super(MSSQLCompiler, self).__init__(*args, **kwargs)
+
+ def _with_legacy_schema_aliasing(fn):
+ def decorate(self, *arg, **kw):
+ if self.dialect.legacy_schema_aliasing:
+ return fn(self, *arg, **kw)
+ else:
+ super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
+ return super_(*arg, **kw)
+ return decorate
+
+ def visit_now_func(self, fn, **kw):
+ return "CURRENT_TIMESTAMP"
+
+ def visit_current_date_func(self, fn, **kw):
+ return "GETDATE()"
+
+ def visit_length_func(self, fn, **kw):
+ return "LEN%s" % self.function_argspec(fn, **kw)
+
+ def visit_char_length_func(self, fn, **kw):
+ return "LEN%s" % self.function_argspec(fn, **kw)
+
+ def visit_concat_op_binary(self, binary, operator, **kw):
+ return "%s + %s" % \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def visit_true(self, expr, **kw):
+ return '1'
+
+ def visit_false(self, expr, **kw):
+ return '0'
+
+ def visit_match_op_binary(self, binary, operator, **kw):
+ return "CONTAINS (%s, %s)" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def get_select_precolumns(self, select, **kw):
+ """ MS-SQL puts TOP, it's version of LIMIT here """
+
+ s = ""
+ if select._distinct:
+ s += "DISTINCT "
+
+ if select._simple_int_limit and not select._offset:
+ # ODBC drivers and possibly others
+ # don't support bind params in the SELECT clause on SQL Server.
+ # so have to use literal here.
+ s += "TOP %d " % select._limit
+
+ if s:
+ return s
+ else:
+ return compiler.SQLCompiler.get_select_precolumns(
+ self, select, **kw)
+
+ def get_from_hint_text(self, table, text):
+ return text
+
+ def get_crud_hint_text(self, table, text):
+ return text
+
+ def limit_clause(self, select, **kw):
+ # Limit in mssql is after the select keyword
+ return ""
+
+ def visit_select(self, select, **kwargs):
+ """Look for ``LIMIT`` and OFFSET in a select statement, and if
+ so tries to wrap it in a subquery with ``row_number()`` criterion.
+
+ """
+ if (
+ (
+ not select._simple_int_limit and
+ select._limit_clause is not None
+ ) or (
+ select._offset_clause is not None and
+ not select._simple_int_offset or select._offset
+ )
+ ) and not getattr(select, '_mssql_visit', None):
+
+ # to use ROW_NUMBER(), an ORDER BY is required.
+ if not select._order_by_clause.clauses:
+ raise exc.CompileError('MSSQL requires an order_by when '
+ 'using an OFFSET or a non-simple '
+ 'LIMIT clause')
+
+ _order_by_clauses = [
+ sql_util.unwrap_label_reference(elem)
+ for elem in select._order_by_clause.clauses
+ ]
+
+ limit_clause = select._limit_clause
+ offset_clause = select._offset_clause
+ kwargs['select_wraps_for'] = select
+ select = select._generate()
+ select._mssql_visit = True
+ select = select.column(
+ sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
+ .label("mssql_rn")).order_by(None).alias()
+
+ mssql_rn = sql.column('mssql_rn')
+ limitselect = sql.select([c for c in select.c if
+ c.key != 'mssql_rn'])
+ if offset_clause is not None:
+ limitselect.append_whereclause(mssql_rn > offset_clause)
+ if limit_clause is not None:
+ limitselect.append_whereclause(
+ mssql_rn <= (limit_clause + offset_clause))
+ else:
+ limitselect.append_whereclause(
+ mssql_rn <= (limit_clause))
+ return self.process(limitselect, **kwargs)
+ else:
+ return compiler.SQLCompiler.visit_select(self, select, **kwargs)
+
+ @_with_legacy_schema_aliasing
+ def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
+ if mssql_aliased is table or iscrud:
+ return super(MSSQLCompiler, self).visit_table(table, **kwargs)
+
+ # alias schema-qualified tables
+ alias = self._schema_aliased_table(table)
+ if alias is not None:
+ return self.process(alias, mssql_aliased=table, **kwargs)
+ else:
+ return super(MSSQLCompiler, self).visit_table(table, **kwargs)
+
+ @_with_legacy_schema_aliasing
+ def visit_alias(self, alias, **kw):
+ # translate for schema-qualified table aliases
+ kw['mssql_aliased'] = alias.original
+ return super(MSSQLCompiler, self).visit_alias(alias, **kw)
+
+ @_with_legacy_schema_aliasing
+ def visit_column(self, column, add_to_result_map=None, **kw):
+ if column.table is not None and \
+ (not self.isupdate and not self.isdelete) or \
+ self.is_subquery():
+ # translate for schema-qualified table aliases
+ t = self._schema_aliased_table(column.table)
+ if t is not None:
+ converted = expression._corresponding_column_or_error(
+ t, column)
+ if add_to_result_map is not None:
+ add_to_result_map(
+ column.name,
+ column.name,
+ (column, column.name, column.key),
+ column.type
+ )
+
+ return super(MSSQLCompiler, self).\
+ visit_column(converted, **kw)
+
+ return super(MSSQLCompiler, self).visit_column(
+ column, add_to_result_map=add_to_result_map, **kw)
+
+ def _schema_aliased_table(self, table):
+ if getattr(table, 'schema', None) is not None:
+ if table not in self.tablealiases:
+ self.tablealiases[table] = table.alias()
+ return self.tablealiases[table]
+ else:
+ return None
+
+ def visit_extract(self, extract, **kw):
+ field = self.extract_map.get(extract.field, extract.field)
+ return 'DATEPART(%s, %s)' % \
+ (field, self.process(extract.expr, **kw))
+
+ def visit_savepoint(self, savepoint_stmt):
+ return "SAVE TRANSACTION %s" % \
+ self.preparer.format_savepoint(savepoint_stmt)
+
+ def visit_rollback_to_savepoint(self, savepoint_stmt):
+ return ("ROLLBACK TRANSACTION %s"
+ % self.preparer.format_savepoint(savepoint_stmt))
+
+ def visit_binary(self, binary, **kwargs):
+ """Move bind parameters to the right-hand side of an operator, where
+ possible.
+
+ """
+ if (
+ isinstance(binary.left, expression.BindParameter)
+ and binary.operator == operator.eq
+ and not isinstance(binary.right, expression.BindParameter)
+ ):
+ return self.process(
+ expression.BinaryExpression(binary.right,
+ binary.left,
+ binary.operator),
+ **kwargs)
+ return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
+
+ def returning_clause(self, stmt, returning_cols):
+
+ if self.isinsert or self.isupdate:
+ target = stmt.table.alias("inserted")
+ else:
+ target = stmt.table.alias("deleted")
+
+ adapter = sql_util.ClauseAdapter(target)
+
+ columns = [
+ self._label_select_column(None, adapter.traverse(c),
+ True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
+
+ return 'OUTPUT ' + ', '.join(columns)
+
+ def get_cte_preamble(self, recursive):
+ # SQL Server finds it too inconvenient to accept
+ # an entirely optional, SQL standard specified,
+ # "RECURSIVE" word with their "WITH",
+ # so here we go
+ return "WITH"
+
+ def label_select_column(self, select, column, asfrom):
+ if isinstance(column, expression.Function):
+ return column.label(None)
+ else:
+ return super(MSSQLCompiler, self).\
+ label_select_column(select, column, asfrom)
+
+ def for_update_clause(self, select):
+ # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
+ # SQLAlchemy doesn't use
+ return ''
+
+ def order_by_clause(self, select, **kw):
+ order_by = self.process(select._order_by_clause, **kw)
+
+ # MSSQL only allows ORDER BY in subqueries if there is a LIMIT
+ if order_by and (not self.is_subquery() or select._limit):
+ return " ORDER BY " + order_by
+ else:
+ return ""
+
+ def update_from_clause(self, update_stmt,
+ from_table, extra_froms,
+ from_hints,
+ **kw):
+ """Render the UPDATE..FROM clause specific to MSSQL.
+
+ In MSSQL, if the UPDATE statement involves an alias of the table to
+ be updated, then the table itself must be added to the FROM list as
+ well. Otherwise, it is optional. Here, we add it regardless.
+
+ """
+ return "FROM " + ', '.join(
+ t._compiler_dispatch(self, asfrom=True,
+ fromhints=from_hints, **kw)
+ for t in [from_table] + extra_froms)
+
+
+class MSSQLStrictCompiler(MSSQLCompiler):
+
+ """A subclass of MSSQLCompiler which disables the usage of bind
+ parameters where not allowed natively by MS-SQL.
+
+ A dialect may use this compiler on a platform where native
+ binds are used.
+
+ """
+ ansi_bind_rules = True
+
+ def visit_in_op_binary(self, binary, operator, **kw):
+ kw['literal_binds'] = True
+ return "%s IN %s" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ def visit_notin_op_binary(self, binary, operator, **kw):
+ kw['literal_binds'] = True
+ return "%s NOT IN %s" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ def render_literal_value(self, value, type_):
+ """
+ For date and datetime values, convert to a string
+ format acceptable to MSSQL. That seems to be the
+ so-called ODBC canonical date format which looks
+ like this:
+
+ yyyy-mm-dd hh:mi:ss.mmm(24h)
+
+ For other data types, call the base class implementation.
+ """
+ # datetime and date are both subclasses of datetime.date
+ if issubclass(type(value), datetime.date):
+ # SQL Server wants single quotes around the date string.
+ return "'" + str(value) + "'"
+ else:
+ return super(MSSQLStrictCompiler, self).\
+ render_literal_value(value, type_)
+
+
+class MSDDLCompiler(compiler.DDLCompiler):
+
+ def get_column_specification(self, column, **kwargs):
+ colspec = (
+ self.preparer.format_column(column) + " "
+ + self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ )
+
+ if column.nullable is not None:
+ if not column.nullable or column.primary_key or \
+ isinstance(column.default, sa_schema.Sequence):
+ colspec += " NOT NULL"
+ else:
+ colspec += " NULL"
+
+ if column.table is None:
+ raise exc.CompileError(
+ "mssql requires Table-bound columns "
+ "in order to generate DDL")
+
+ # install an IDENTITY Sequence if we either a sequence or an implicit
+ # IDENTITY column
+ if isinstance(column.default, sa_schema.Sequence):
+ if column.default.start == 0:
+ start = 0
+ else:
+ start = column.default.start or 1
+
+ colspec += " IDENTITY(%s,%s)" % (start,
+ column.default.increment or 1)
+ elif column is column.table._autoincrement_column:
+ colspec += " IDENTITY(1,1)"
+ else:
+ default = self.get_column_default_string(column)
+ if default is not None:
+ colspec += " DEFAULT " + default
+
+ return colspec
+
+ def visit_create_index(self, create, include_schema=False):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+
+ # handle clustering option
+ clustered = index.dialect_options['mssql']['clustered']
+ if clustered is not None:
+ if clustered:
+ text += "CLUSTERED "
+ else:
+ text += "NONCLUSTERED "
+
+ text += "INDEX %s ON %s (%s)" \
+ % (
+ self._prepared_index_name(index,
+ include_schema=include_schema),
+ preparer.format_table(index.table),
+ ', '.join(
+ self.sql_compiler.process(expr,
+ include_table=False,
+ literal_binds=True) for
+ expr in index.expressions)
+ )
+
+ # handle other included columns
+ if index.dialect_options['mssql']['include']:
+ inclusions = [index.table.c[col]
+ if isinstance(col, util.string_types) else col
+ for col in
+ index.dialect_options['mssql']['include']
+ ]
+
+ text += " INCLUDE (%s)" \
+ % ', '.join([preparer.quote(c.name)
+ for c in inclusions])
+
+ return text
+
+ def visit_drop_index(self, drop):
+ return "\nDROP INDEX %s ON %s" % (
+ self._prepared_index_name(drop.element, include_schema=False),
+ self.preparer.format_table(drop.element.table)
+ )
+
+ def visit_primary_key_constraint(self, constraint):
+ if len(constraint) == 0:
+ return ''
+ text = ""
+ if constraint.name is not None:
+ text += "CONSTRAINT %s " % \
+ self.preparer.format_constraint(constraint)
+ text += "PRIMARY KEY "
+
+ clustered = constraint.dialect_options['mssql']['clustered']
+ if clustered is not None:
+ if clustered:
+ text += "CLUSTERED "
+ else:
+ text += "NONCLUSTERED "
+
+ text += "(%s)" % ', '.join(self.preparer.quote(c.name)
+ for c in constraint)
+ text += self.define_constraint_deferrability(constraint)
+ return text
+
+ def visit_unique_constraint(self, constraint):
+ if len(constraint) == 0:
+ return ''
+ text = ""
+ if constraint.name is not None:
+ text += "CONSTRAINT %s " % \
+ self.preparer.format_constraint(constraint)
+ text += "UNIQUE "
+
+ clustered = constraint.dialect_options['mssql']['clustered']
+ if clustered is not None:
+ if clustered:
+ text += "CLUSTERED "
+ else:
+ text += "NONCLUSTERED "
+
+ text += "(%s)" % ', '.join(self.preparer.quote(c.name)
+ for c in constraint)
+ text += self.define_constraint_deferrability(constraint)
+ return text
+
+
+class MSIdentifierPreparer(compiler.IdentifierPreparer):
+ reserved_words = RESERVED_WORDS
+
+ def __init__(self, dialect):
+ super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
+ final_quote=']')
+
+ def _escape_identifier(self, value):
+ return value
+
+ def quote_schema(self, schema, force=None):
+ """Prepare a quoted table and schema name."""
+ result = '.'.join([self.quote(x, force) for x in schema.split('.')])
+ return result
+
+
+def _db_plus_owner_listing(fn):
+ def wrap(dialect, connection, schema=None, **kw):
+ dbname, owner = _owner_plus_db(dialect, schema)
+ return _switch_db(dbname, connection, fn, dialect, connection,
+ dbname, owner, schema, **kw)
+ return update_wrapper(wrap, fn)
+
+
+def _db_plus_owner(fn):
+ def wrap(dialect, connection, tablename, schema=None, **kw):
+ dbname, owner = _owner_plus_db(dialect, schema)
+ return _switch_db(dbname, connection, fn, dialect, connection,
+ tablename, dbname, owner, schema, **kw)
+ return update_wrapper(wrap, fn)
+
+
+def _switch_db(dbname, connection, fn, *arg, **kw):
+ if dbname:
+ current_db = connection.scalar("select db_name()")
+ connection.execute("use %s" % dbname)
+ try:
+ return fn(*arg, **kw)
+ finally:
+ if dbname:
+ connection.execute("use %s" % current_db)
+
+
+def _owner_plus_db(dialect, schema):
+ if not schema:
+ return None, dialect.default_schema_name
+ elif "." in schema:
+ return schema.split(".", 1)
+ else:
+ return None, schema
+
+
+class MSDialect(default.DefaultDialect):
+ name = 'mssql'
+ supports_default_values = True
+ supports_empty_insert = False
+ execution_ctx_cls = MSExecutionContext
+ use_scope_identity = True
+ max_identifier_length = 128
+ schema_name = "dbo"
+
+ colspecs = {
+ sqltypes.DateTime: _MSDateTime,
+ sqltypes.Date: _MSDate,
+ sqltypes.Time: TIME,
+ }
+
+ engine_config_types = default.DefaultDialect.engine_config_types.union([
+ ('legacy_schema_aliasing', util.asbool),
+ ])
+
+ ischema_names = ischema_names
+
+ supports_native_boolean = False
+ supports_unicode_binds = True
+ postfetch_lastrowid = True
+
+ server_version_info = ()
+
+ statement_compiler = MSSQLCompiler
+ ddl_compiler = MSDDLCompiler
+ type_compiler = MSTypeCompiler
+ preparer = MSIdentifierPreparer
+
+ construct_arguments = [
+ (sa_schema.PrimaryKeyConstraint, {
+ "clustered": None
+ }),
+ (sa_schema.UniqueConstraint, {
+ "clustered": None
+ }),
+ (sa_schema.Index, {
+ "clustered": None,
+ "include": None
+ })
+ ]
+
+ def __init__(self,
+ query_timeout=None,
+ use_scope_identity=True,
+ max_identifier_length=None,
+ schema_name="dbo",
+ isolation_level=None,
+ deprecate_large_types=None,
+ legacy_schema_aliasing=False, **opts):
+ self.query_timeout = int(query_timeout or 0)
+ self.schema_name = schema_name
+
+ self.use_scope_identity = use_scope_identity
+ self.max_identifier_length = int(max_identifier_length or 0) or \
+ self.max_identifier_length
+ self.deprecate_large_types = deprecate_large_types
+ self.legacy_schema_aliasing = legacy_schema_aliasing
+
+ super(MSDialect, self).__init__(**opts)
+
+ self.isolation_level = isolation_level
+
+ def do_savepoint(self, connection, name):
+ # give the DBAPI a push
+ connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
+ super(MSDialect, self).do_savepoint(connection, name)
+
+ def do_release_savepoint(self, connection, name):
+ # SQL Server does not support RELEASE SAVEPOINT
+ pass
+
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ',
+ 'SNAPSHOT'])
+
+ def set_isolation_level(self, connection, level):
+ level = level.replace('_', ' ')
+ if level not in self._isolation_lookup:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+ cursor = connection.cursor()
+ cursor.execute(
+ "SET TRANSACTION ISOLATION LEVEL %s" % level)
+ cursor.close()
+
+ def get_isolation_level(self, connection):
+ if self.server_version_info < MS_2005_VERSION:
+ raise NotImplementedError(
+ "Can't fetch isolation level prior to SQL Server 2005")
+
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT CASE transaction_isolation_level
+ WHEN 0 THEN NULL
+ WHEN 1 THEN 'READ UNCOMMITTED'
+ WHEN 2 THEN 'READ COMMITTED'
+ WHEN 3 THEN 'REPEATABLE READ'
+ WHEN 4 THEN 'SERIALIZABLE'
+ WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
+ FROM sys.dm_exec_sessions
+ where session_id = @@SPID
+ """)
+ val = cursor.fetchone()[0]
+ cursor.close()
+ return val.upper()
+
+ def initialize(self, connection):
+ super(MSDialect, self).initialize(connection)
+ self._setup_version_attributes()
+
+ def on_connect(self):
+ if self.isolation_level is not None:
+ def connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ return connect
+ else:
+ return None
+
+ def _setup_version_attributes(self):
+ if self.server_version_info[0] not in list(range(8, 17)):
+ util.warn(
+ "Unrecognized server version info '%s'. Some SQL Server "
+ "features may not function properly." %
+ ".".join(str(x) for x in self.server_version_info))
+ if self.server_version_info >= MS_2005_VERSION and \
+ 'implicit_returning' not in self.__dict__:
+ self.implicit_returning = True
+ if self.server_version_info >= MS_2008_VERSION:
+ self.supports_multivalues_insert = True
+ if self.deprecate_large_types is None:
+ self.deprecate_large_types = \
+ self.server_version_info >= MS_2012_VERSION
+
+ def _get_default_schema_name(self, connection):
+ if self.server_version_info < MS_2005_VERSION:
+ return self.schema_name
+ else:
+ query = sql.text("SELECT schema_name()")
+ default_schema_name = connection.scalar(query)
+ if default_schema_name is not None:
+ return util.text_type(default_schema_name)
+ else:
+ return self.schema_name
+
+ @_db_plus_owner
+ def has_table(self, connection, tablename, dbname, owner, schema):
+ columns = ischema.columns
+
+ whereclause = columns.c.table_name == tablename
+
+ if owner:
+ whereclause = sql.and_(whereclause,
+ columns.c.table_schema == owner)
+ s = sql.select([columns], whereclause)
+ c = connection.execute(s)
+ return c.first() is not None
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+ s = sql.select([ischema.schemata.c.schema_name],
+ order_by=[ischema.schemata.c.schema_name]
+ )
+ schema_names = [r[0] for r in connection.execute(s)]
+ return schema_names
+
+ @reflection.cache
+ @_db_plus_owner_listing
+ def get_table_names(self, connection, dbname, owner, schema, **kw):
+ tables = ischema.tables
+ s = sql.select([tables.c.table_name],
+ sql.and_(
+ tables.c.table_schema == owner,
+ tables.c.table_type == 'BASE TABLE'
+ ),
+ order_by=[tables.c.table_name]
+ )
+ table_names = [r[0] for r in connection.execute(s)]
+ return table_names
+
+ @reflection.cache
+ @_db_plus_owner_listing
+ def get_view_names(self, connection, dbname, owner, schema, **kw):
+ tables = ischema.tables
+ s = sql.select([tables.c.table_name],
+ sql.and_(
+ tables.c.table_schema == owner,
+ tables.c.table_type == 'VIEW'
+ ),
+ order_by=[tables.c.table_name]
+ )
+ view_names = [r[0] for r in connection.execute(s)]
+ return view_names
+
+ @reflection.cache
+ @_db_plus_owner
+ def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
+ # using system catalogs, don't support index reflection
+ # below MS 2005
+ if self.server_version_info < MS_2005_VERSION:
+ return []
+
+ rp = connection.execute(
+ sql.text("select ind.index_id, ind.is_unique, ind.name "
+ "from sys.indexes as ind join sys.tables as tab on "
+ "ind.object_id=tab.object_id "
+ "join sys.schemas as sch on sch.schema_id=tab.schema_id "
+ "where tab.name = :tabname "
+ "and sch.name=:schname "
+ "and ind.is_primary_key=0",
+ bindparams=[
+ sql.bindparam('tabname', tablename,
+ sqltypes.String(convert_unicode=True)),
+ sql.bindparam('schname', owner,
+ sqltypes.String(convert_unicode=True))
+ ],
+ typemap={
+ 'name': sqltypes.Unicode()
+ }
+ )
+ )
+ indexes = {}
+ for row in rp:
+ indexes[row['index_id']] = {
+ 'name': row['name'],
+ 'unique': row['is_unique'] == 1,
+ 'column_names': []
+ }
+ rp = connection.execute(
+ sql.text(
+ "select ind_col.index_id, ind_col.object_id, col.name "
+ "from sys.columns as col "
+ "join sys.tables as tab on tab.object_id=col.object_id "
+ "join sys.index_columns as ind_col on "
+ "(ind_col.column_id=col.column_id and "
+ "ind_col.object_id=tab.object_id) "
+ "join sys.schemas as sch on sch.schema_id=tab.schema_id "
+ "where tab.name=:tabname "
+ "and sch.name=:schname",
+ bindparams=[
+ sql.bindparam('tabname', tablename,
+ sqltypes.String(convert_unicode=True)),
+ sql.bindparam('schname', owner,
+ sqltypes.String(convert_unicode=True))
+ ],
+ typemap={'name': sqltypes.Unicode()}
+ ),
+ )
+ for row in rp:
+ if row['index_id'] in indexes:
+ indexes[row['index_id']]['column_names'].append(row['name'])
+
+ return list(indexes.values())
+
+ @reflection.cache
+ @_db_plus_owner
+ def get_view_definition(self, connection, viewname,
+ dbname, owner, schema, **kw):
+ rp = connection.execute(
+ sql.text(
+ "select definition from sys.sql_modules as mod, "
+ "sys.views as views, "
+ "sys.schemas as sch"
+ " where "
+ "mod.object_id=views.object_id and "
+ "views.schema_id=sch.schema_id and "
+ "views.name=:viewname and sch.name=:schname",
+ bindparams=[
+ sql.bindparam('viewname', viewname,
+ sqltypes.String(convert_unicode=True)),
+ sql.bindparam('schname', owner,
+ sqltypes.String(convert_unicode=True))
+ ]
+ )
+ )
+
+ if rp:
+ view_def = rp.scalar()
+ return view_def
+
+ @reflection.cache
+ @_db_plus_owner
+ def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
+ # Get base columns
+ columns = ischema.columns
+ if owner:
+ whereclause = sql.and_(columns.c.table_name == tablename,
+ columns.c.table_schema == owner)
+ else:
+ whereclause = columns.c.table_name == tablename
+ s = sql.select([columns], whereclause,
+ order_by=[columns.c.ordinal_position])
+
+ c = connection.execute(s)
+ cols = []
+ while True:
+ row = c.fetchone()
+ if row is None:
+ break
+ (name, type, nullable, charlen,
+ numericprec, numericscale, default, collation) = (
+ row[columns.c.column_name],
+ row[columns.c.data_type],
+ row[columns.c.is_nullable] == 'YES',
+ row[columns.c.character_maximum_length],
+ row[columns.c.numeric_precision],
+ row[columns.c.numeric_scale],
+ row[columns.c.column_default],
+ row[columns.c.collation_name]
+ )
+ coltype = self.ischema_names.get(type, None)
+
+ kwargs = {}
+ if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
+ MSNText, MSBinary, MSVarBinary,
+ sqltypes.LargeBinary):
+ if charlen == -1:
+ charlen = None
+ kwargs['length'] = charlen
+ if collation:
+ kwargs['collation'] = collation
+
+ if coltype is None:
+ util.warn(
+ "Did not recognize type '%s' of column '%s'" %
+ (type, name))
+ coltype = sqltypes.NULLTYPE
+ else:
+ if issubclass(coltype, sqltypes.Numeric) and \
+ coltype is not MSReal:
+ kwargs['scale'] = numericscale
+ kwargs['precision'] = numericprec
+
+ coltype = coltype(**kwargs)
+ cdict = {
+ 'name': name,
+ 'type': coltype,
+ 'nullable': nullable,
+ 'default': default,
+ 'autoincrement': False,
+ }
+ cols.append(cdict)
+ # autoincrement and identity
+ colmap = {}
+ for col in cols:
+ colmap[col['name']] = col
+ # We also run an sp_columns to check for identity columns:
+ cursor = connection.execute("sp_columns @table_name = '%s', "
+ "@table_owner = '%s'"
+ % (tablename, owner))
+ ic = None
+ while True:
+ row = cursor.fetchone()
+ if row is None:
+ break
+ (col_name, type_name) = row[3], row[5]
+ if type_name.endswith("identity") and col_name in colmap:
+ ic = col_name
+ colmap[col_name]['autoincrement'] = True
+ colmap[col_name]['sequence'] = dict(
+ name='%s_identity' % col_name)
+ break
+ cursor.close()
+
+ if ic is not None and self.server_version_info >= MS_2005_VERSION:
+ table_fullname = "%s.%s" % (owner, tablename)
+ cursor = connection.execute(
+ "select ident_seed('%s'), ident_incr('%s')"
+ % (table_fullname, table_fullname)
+ )
+
+ row = cursor.first()
+ if row is not None and row[0] is not None:
+ colmap[ic]['sequence'].update({
+ 'start': int(row[0]),
+ 'increment': int(row[1])
+ })
+ return cols
+
+ @reflection.cache
+ @_db_plus_owner
+ def get_pk_constraint(self, connection, tablename,
+ dbname, owner, schema, **kw):
+ pkeys = []
+ TC = ischema.constraints
+ C = ischema.key_constraints.alias('C')
+
+ # Primary key constraints
+ s = sql.select([C.c.column_name,
+ TC.c.constraint_type,
+ C.c.constraint_name],
+ sql.and_(TC.c.constraint_name == C.c.constraint_name,
+ TC.c.table_schema == C.c.table_schema,
+ C.c.table_name == tablename,
+ C.c.table_schema == owner)
+ )
+ c = connection.execute(s)
+ constraint_name = None
+ for row in c:
+ if 'PRIMARY' in row[TC.c.constraint_type.name]:
+ pkeys.append(row[0])
+ if constraint_name is None:
+ constraint_name = row[C.c.constraint_name.name]
+ return {'constrained_columns': pkeys, 'name': constraint_name}
+
+ @reflection.cache
+ @_db_plus_owner
+ def get_foreign_keys(self, connection, tablename,
+ dbname, owner, schema, **kw):
+ RR = ischema.ref_constraints
+ C = ischema.key_constraints.alias('C')
+ R = ischema.key_constraints.alias('R')
+
+ # Foreign key constraints
+ s = sql.select([C.c.column_name,
+ R.c.table_schema, R.c.table_name, R.c.column_name,
+ RR.c.constraint_name, RR.c.match_option,
+ RR.c.update_rule,
+ RR.c.delete_rule],
+ sql.and_(C.c.table_name == tablename,
+ C.c.table_schema == owner,
+ C.c.constraint_name == RR.c.constraint_name,
+ R.c.constraint_name ==
+ RR.c.unique_constraint_name,
+ C.c.ordinal_position == R.c.ordinal_position
+ ),
+ order_by=[RR.c.constraint_name, R.c.ordinal_position]
+ )
+
+ # group rows by constraint ID, to handle multi-column FKs
+ fkeys = []
+ fknm, scols, rcols = (None, [], [])
+
+ def fkey_rec():
+ return {
+ 'name': None,
+ 'constrained_columns': [],
+ 'referred_schema': None,
+ 'referred_table': None,
+ 'referred_columns': []
+ }
+
+ fkeys = util.defaultdict(fkey_rec)
+
+ for r in connection.execute(s).fetchall():
+ scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
+
+ rec = fkeys[rfknm]
+ rec['name'] = rfknm
+ if not rec['referred_table']:
+ rec['referred_table'] = rtbl
+ if schema is not None or owner != rschema:
+ if dbname:
+ rschema = dbname + "." + rschema
+ rec['referred_schema'] = rschema
+
+ local_cols, remote_cols = \
+ rec['constrained_columns'],\
+ rec['referred_columns']
+
+ local_cols.append(scol)
+ remote_cols.append(rcol)
+
+ return list(fkeys.values())
diff --git a/app/lib/sqlalchemy/dialects/mssql/information_schema.py b/app/lib/sqlalchemy/dialects/mssql/information_schema.py
new file mode 100644
index 0000000..625479b
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/information_schema.py
@@ -0,0 +1,136 @@
+# mssql/information_schema.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+# TODO: should be using the sys. catalog with SQL Server, not information
+# schema
+
+from ... import Table, MetaData, Column
+from ...types import String, Unicode, UnicodeText, Integer, TypeDecorator
+from ... import cast
+from ... import util
+from ...sql import expression
+from ...ext.compiler import compiles
+
+ischema = MetaData()
+
+
+class CoerceUnicode(TypeDecorator):
+ impl = Unicode
+
+ def process_bind_param(self, value, dialect):
+ if util.py2k and isinstance(value, util.binary_type):
+ value = value.decode(dialect.encoding)
+ return value
+
+ def bind_expression(self, bindvalue):
+ return _cast_on_2005(bindvalue)
+
+
+class _cast_on_2005(expression.ColumnElement):
+ def __init__(self, bindvalue):
+ self.bindvalue = bindvalue
+
+
+@compiles(_cast_on_2005)
+def _compile(element, compiler, **kw):
+ from . import base
+ if compiler.dialect.server_version_info < base.MS_2005_VERSION:
+ return compiler.process(element.bindvalue, **kw)
+ else:
+ return compiler.process(cast(element.bindvalue, Unicode), **kw)
+
+schemata = Table("SCHEMATA", ischema,
+ Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
+ Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
+ Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
+ schema="INFORMATION_SCHEMA")
+
+tables = Table("TABLES", ischema,
+ Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column(
+ "TABLE_TYPE", String(convert_unicode=True),
+ key="table_type"),
+ schema="INFORMATION_SCHEMA")
+
+columns = Table("COLUMNS", ischema,
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
+ Column("IS_NULLABLE", Integer, key="is_nullable"),
+ Column("DATA_TYPE", String, key="data_type"),
+ Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
+ Column("CHARACTER_MAXIMUM_LENGTH", Integer,
+ key="character_maximum_length"),
+ Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
+ Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
+ Column("COLUMN_DEFAULT", Integer, key="column_default"),
+ Column("COLLATION_NAME", String, key="collation_name"),
+ schema="INFORMATION_SCHEMA")
+
+constraints = Table("TABLE_CONSTRAINTS", ischema,
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ Column("CONSTRAINT_TYPE", String(
+ convert_unicode=True), key="constraint_type"),
+ schema="INFORMATION_SCHEMA")
+
+column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema,
+ Column("TABLE_SCHEMA", CoerceUnicode,
+ key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode,
+ key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode,
+ key="column_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ schema="INFORMATION_SCHEMA")
+
+key_constraints = Table("KEY_COLUMN_USAGE", ischema,
+ Column("TABLE_SCHEMA", CoerceUnicode,
+ key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode,
+ key="table_name"),
+ Column("COLUMN_NAME", CoerceUnicode,
+ key="column_name"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ Column("ORDINAL_POSITION", Integer,
+ key="ordinal_position"),
+ schema="INFORMATION_SCHEMA")
+
+ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema,
+ Column("CONSTRAINT_CATALOG", CoerceUnicode,
+ key="constraint_catalog"),
+ Column("CONSTRAINT_SCHEMA", CoerceUnicode,
+ key="constraint_schema"),
+ Column("CONSTRAINT_NAME", CoerceUnicode,
+ key="constraint_name"),
+ # TODO: is CATLOG misspelled ?
+ Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode,
+ key="unique_constraint_catalog"),
+
+ Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode,
+ key="unique_constraint_schema"),
+ Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode,
+ key="unique_constraint_name"),
+ Column("MATCH_OPTION", String, key="match_option"),
+ Column("UPDATE_RULE", String, key="update_rule"),
+ Column("DELETE_RULE", String, key="delete_rule"),
+ schema="INFORMATION_SCHEMA")
+
+views = Table("VIEWS", ischema,
+ Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
+ Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
+ Column("TABLE_NAME", CoerceUnicode, key="table_name"),
+ Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
+ Column("CHECK_OPTION", String, key="check_option"),
+ Column("IS_UPDATABLE", String, key="is_updatable"),
+ schema="INFORMATION_SCHEMA")
diff --git a/app/lib/sqlalchemy/dialects/mssql/mxodbc.py b/app/lib/sqlalchemy/dialects/mssql/mxodbc.py
new file mode 100644
index 0000000..41729b7
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/mxodbc.py
@@ -0,0 +1,139 @@
+# mssql/mxodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mssql+mxodbc
+ :name: mxODBC
+ :dbapi: mxodbc
+ :connectstring: mssql+mxodbc://:@
+ :url: http://www.egenix.com/
+
+Execution Modes
+---------------
+
+mxODBC features two styles of statement execution, using the
+``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
+an extension to the DBAPI specification). The former makes use of a particular
+API call specific to the SQL Server Native Client ODBC driver known
+SQLDescribeParam, while the latter does not.
+
+mxODBC apparently only makes repeated use of a single prepared statement
+when SQLDescribeParam is used. The advantage to prepared statement reuse is
+one of performance. The disadvantage is that SQLDescribeParam has a limited
+set of scenarios in which bind parameters are understood, including that they
+cannot be placed within the argument lists of function calls, anywhere outside
+the FROM, or even within subqueries within the FROM clause - making the usage
+of bind parameters within SELECT statements impossible for all but the most
+simplistic statements.
+
+For this reason, the mxODBC dialect uses the "native" mode by default only for
+INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
+all other statements.
+
+This behavior can be controlled via
+:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
+``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
+value of ``True`` will unconditionally use native bind parameters and a value
+of ``False`` will unconditionally use string-escaped parameters.
+
+"""
+
+
+from ... import types as sqltypes
+from ...connectors.mxodbc import MxODBCConnector
+from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
+from .base import (MSDialect,
+ MSSQLStrictCompiler,
+ VARBINARY,
+ _MSDateTime, _MSDate, _MSTime)
+
+
+class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
+ """Include pyodbc's numeric processor.
+ """
+
+
+class _MSDate_mxodbc(_MSDate):
+ def bind_processor(self, dialect):
+ def process(value):
+ if value is not None:
+ return "%s-%s-%s" % (value.year, value.month, value.day)
+ else:
+ return None
+ return process
+
+
+class _MSTime_mxodbc(_MSTime):
+ def bind_processor(self, dialect):
+ def process(value):
+ if value is not None:
+ return "%s:%s:%s" % (value.hour, value.minute, value.second)
+ else:
+ return None
+ return process
+
+
+class _VARBINARY_mxodbc(VARBINARY):
+
+ """
+ mxODBC Support for VARBINARY column types.
+
+ This handles the special case for null VARBINARY values,
+ which maps None values to the mx.ODBC.Manager.BinaryNull symbol.
+ """
+
+ def bind_processor(self, dialect):
+ if dialect.dbapi is None:
+ return None
+
+ DBAPIBinary = dialect.dbapi.Binary
+
+ def process(value):
+ if value is not None:
+ return DBAPIBinary(value)
+ else:
+ # should pull from mx.ODBC.Manager.BinaryNull
+ return dialect.dbapi.BinaryNull
+ return process
+
+
+class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
+ """
+ The pyodbc execution context is useful for enabling
+ SELECT SCOPE_IDENTITY in cases where OUTPUT clause
+ does not work (tables with insert triggers).
+ """
+ # todo - investigate whether the pyodbc execution context
+ # is really only being used in cases where OUTPUT
+ # won't work.
+
+
+class MSDialect_mxodbc(MxODBCConnector, MSDialect):
+
+ # this is only needed if "native ODBC" mode is used,
+ # which is now disabled by default.
+ # statement_compiler = MSSQLStrictCompiler
+
+ execution_ctx_cls = MSExecutionContext_mxodbc
+
+ # flag used by _MSNumeric_mxodbc
+ _need_decimal_fix = True
+
+ colspecs = {
+ sqltypes.Numeric: _MSNumeric_mxodbc,
+ sqltypes.DateTime: _MSDateTime,
+ sqltypes.Date: _MSDate_mxodbc,
+ sqltypes.Time: _MSTime_mxodbc,
+ VARBINARY: _VARBINARY_mxodbc,
+ sqltypes.LargeBinary: _VARBINARY_mxodbc,
+ }
+
+ def __init__(self, description_encoding=None, **params):
+ super(MSDialect_mxodbc, self).__init__(**params)
+ self.description_encoding = description_encoding
+
+dialect = MSDialect_mxodbc
diff --git a/app/lib/sqlalchemy/dialects/mssql/pymssql.py b/app/lib/sqlalchemy/dialects/mssql/pymssql.py
new file mode 100644
index 0000000..57ca8ab
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -0,0 +1,97 @@
+# mssql/pymssql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mssql+pymssql
+ :name: pymssql
+ :dbapi: pymssql
+ :connectstring: mssql+pymssql://:@/?\
+charset=utf8
+ :url: http://pymssql.org/
+
+pymssql is a Python module that provides a Python DBAPI interface around
+`FreeTDS `_. Compatible builds are available for
+Linux, MacOSX and Windows platforms.
+
+"""
+from .base import MSDialect
+from ... import types as sqltypes, util, processors
+import re
+
+
+class _MSNumeric_pymssql(sqltypes.Numeric):
+ def result_processor(self, dialect, type_):
+ if not self.asdecimal:
+ return processors.to_float
+ else:
+ return sqltypes.Numeric.result_processor(self, dialect, type_)
+
+
+class MSDialect_pymssql(MSDialect):
+ supports_sane_rowcount = False
+ driver = 'pymssql'
+
+ colspecs = util.update_copy(
+ MSDialect.colspecs,
+ {
+ sqltypes.Numeric: _MSNumeric_pymssql,
+ sqltypes.Float: sqltypes.Float,
+ }
+ )
+
+ @classmethod
+ def dbapi(cls):
+ module = __import__('pymssql')
+ # pymmsql < 2.1.1 doesn't have a Binary method. we use string
+ client_ver = tuple(int(x) for x in module.__version__.split("."))
+ if client_ver < (2, 1, 1):
+ # TODO: monkeypatching here is less than ideal
+ module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
+
+ if client_ver < (1, ):
+ util.warn("The pymssql dialect expects at least "
+ "the 1.0 series of the pymssql DBAPI.")
+ return module
+
+ def __init__(self, **params):
+ super(MSDialect_pymssql, self).__init__(**params)
+ self.use_scope_identity = True
+
+ def _get_server_version_info(self, connection):
+ vers = connection.scalar("select @@version")
+ m = re.match(
+ r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
+ if m:
+ return tuple(int(x) for x in m.group(1, 2, 3, 4))
+ else:
+ return None
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ opts.update(url.query)
+ port = opts.pop('port', None)
+ if port and 'host' in opts:
+ opts['host'] = "%s:%s" % (opts['host'], port)
+ return [[], opts]
+
+ def is_disconnect(self, e, connection, cursor):
+ for msg in (
+ "Adaptive Server connection timed out",
+ "Net-Lib error during Connection reset by peer",
+ "message 20003", # connection timeout
+ "Error 10054",
+ "Not connected to any MS SQL server",
+ "Connection is closed",
+ "message 20006", # Write to the server failed
+ "message 20017", # Unexpected EOF from the server
+ ):
+ if msg in str(e):
+ return True
+ else:
+ return False
+
+dialect = MSDialect_pymssql
diff --git a/app/lib/sqlalchemy/dialects/mssql/pyodbc.py b/app/lib/sqlalchemy/dialects/mssql/pyodbc.py
new file mode 100644
index 0000000..c6368f9
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/pyodbc.py
@@ -0,0 +1,292 @@
+# mssql/pyodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+.. dialect:: mssql+pyodbc
+ :name: PyODBC
+ :dbapi: pyodbc
+ :connectstring: mssql+pyodbc://:@
+ :url: http://pypi.python.org/pypi/pyodbc/
+
+Connecting to PyODBC
+--------------------
+
+The URL here is to be translated to PyODBC connection strings, as
+detailed in `ConnectionStrings `_.
+
+DSN Connections
+^^^^^^^^^^^^^^^
+
+A DSN-based connection is **preferred** overall when using ODBC. A
+basic DSN-based connection looks like::
+
+ engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
+
+Which above, will pass the following connection string to PyODBC::
+
+ dsn=mydsn;UID=user;PWD=pass
+
+If the username and password are omitted, the DSN form will also add
+the ``Trusted_Connection=yes`` directive to the ODBC string.
+
+Hostname Connections
+^^^^^^^^^^^^^^^^^^^^
+
+Hostname-based connections are **not preferred**, however are supported.
+The ODBC driver name must be explicitly specified::
+
+ engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
+
+.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
+ SQL Server driver name specified explicitly. SQLAlchemy cannot
+ choose an optimal default here as it varies based on platform
+ and installed drivers.
+
+Other keywords interpreted by the Pyodbc dialect to be passed to
+``pyodbc.connect()`` in both the DSN and hostname cases include:
+``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
+
+Pass through exact Pyodbc string
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A PyODBC connection string can also be sent exactly as specified in
+`ConnectionStrings `_
+into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
+as illustrated below using ``urllib.quote_plus``::
+
+ import urllib
+ params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
+
+ engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
+
+
+Unicode Binds
+-------------
+
+The current state of PyODBC on a unix backend with FreeTDS and/or
+EasySoft is poor regarding unicode; different OS platforms and versions of
+UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
+dramatically alter how strings are received. The PyODBC dialect attempts to
+use all the information it knows to determine whether or not a Python unicode
+literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
+can encode these to bytestrings first, some users have reported that PyODBC
+mis-handles bytestrings for certain encodings and requires a Python unicode
+object, while the author has observed widespread cases where a Python unicode
+is completely misinterpreted by PyODBC, particularly when dealing with
+the information schema tables used in table reflection, and the value
+must first be encoded to a bytestring.
+
+It is for this reason that whether or not unicode literals for bound
+parameters be sent to PyODBC can be controlled using the
+``supports_unicode_binds`` parameter to ``create_engine()``. When
+left at its default of ``None``, the PyODBC dialect will use its
+best guess as to whether or not the driver deals with unicode literals
+well. When ``False``, unicode literals will be encoded first, and when
+``True`` unicode literals will be passed straight through. This is an interim
+flag that hopefully should not be needed when the unicode situation stabilizes
+for unix + PyODBC.
+
+.. versionadded:: 0.7.7
+ ``supports_unicode_binds`` parameter to ``create_engine()``\ .
+
+Rowcount Support
+----------------
+
+Pyodbc only has partial support for rowcount. See the notes at
+:ref:`mssql_rowcount_versioning` for important notes when using ORM
+versioning.
+
+"""
+
+from .base import MSExecutionContext, MSDialect, VARBINARY
+from ...connectors.pyodbc import PyODBCConnector
+from ... import types as sqltypes, util, exc
+import decimal
+import re
+
+
+class _ms_numeric_pyodbc(object):
+
+ """Turns Decimals with adjusted() < 0 or > 7 into strings.
+
+ The routines here are needed for older pyodbc versions
+ as well as current mxODBC versions.
+
+ """
+
+ def bind_processor(self, dialect):
+
+ super_process = super(_ms_numeric_pyodbc, self).\
+ bind_processor(dialect)
+
+ if not dialect._need_decimal_fix:
+ return super_process
+
+ def process(value):
+ if self.asdecimal and \
+ isinstance(value, decimal.Decimal):
+
+ adjusted = value.adjusted()
+ if adjusted < 0:
+ return self._small_dec_to_string(value)
+ elif adjusted > 7:
+ return self._large_dec_to_string(value)
+
+ if super_process:
+ return super_process(value)
+ else:
+ return value
+ return process
+
+ # these routines needed for older versions of pyodbc.
+ # as of 2.1.8 this logic is integrated.
+
+ def _small_dec_to_string(self, value):
+ return "%s0.%s%s" % (
+ (value < 0 and '-' or ''),
+ '0' * (abs(value.adjusted()) - 1),
+ "".join([str(nint) for nint in value.as_tuple()[1]]))
+
+ def _large_dec_to_string(self, value):
+ _int = value.as_tuple()[1]
+ if 'E' in str(value):
+ result = "%s%s%s" % (
+ (value < 0 and '-' or ''),
+ "".join([str(s) for s in _int]),
+ "0" * (value.adjusted() - (len(_int) - 1)))
+ else:
+ if (len(_int) - 1) > value.adjusted():
+ result = "%s%s.%s" % (
+ (value < 0 and '-' or ''),
+ "".join(
+ [str(s) for s in _int][0:value.adjusted() + 1]),
+ "".join(
+ [str(s) for s in _int][value.adjusted() + 1:]))
+ else:
+ result = "%s%s" % (
+ (value < 0 and '-' or ''),
+ "".join(
+ [str(s) for s in _int][0:value.adjusted() + 1]))
+ return result
+
+
+class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
+ pass
+
+
+class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
+ pass
+
+
+class _VARBINARY_pyodbc(VARBINARY):
+ def bind_processor(self, dialect):
+ if dialect.dbapi is None:
+ return None
+
+ DBAPIBinary = dialect.dbapi.Binary
+
+ def process(value):
+ if value is not None:
+ return DBAPIBinary(value)
+ else:
+ # pyodbc-specific
+ return dialect.dbapi.BinaryNull
+ return process
+
+
+class MSExecutionContext_pyodbc(MSExecutionContext):
+ _embedded_scope_identity = False
+
+ def pre_exec(self):
+ """where appropriate, issue "select scope_identity()" in the same
+ statement.
+
+ Background on why "scope_identity()" is preferable to "@@identity":
+ http://msdn.microsoft.com/en-us/library/ms190315.aspx
+
+ Background on why we attempt to embed "scope_identity()" into the same
+ statement as the INSERT:
+ http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
+
+ """
+
+ super(MSExecutionContext_pyodbc, self).pre_exec()
+
+ # don't embed the scope_identity select into an
+ # "INSERT .. DEFAULT VALUES"
+ if self._select_lastrowid and \
+ self.dialect.use_scope_identity and \
+ len(self.parameters[0]):
+ self._embedded_scope_identity = True
+
+ self.statement += "; select scope_identity()"
+
+ def post_exec(self):
+ if self._embedded_scope_identity:
+ # Fetch the last inserted id from the manipulated statement
+ # We may have to skip over a number of result sets with
+ # no data (due to triggers, etc.)
+ while True:
+ try:
+ # fetchall() ensures the cursor is consumed
+ # without closing it (FreeTDS particularly)
+ row = self.cursor.fetchall()[0]
+ break
+ except self.dialect.dbapi.Error as e:
+ # no way around this - nextset() consumes the previous set
+ # so we need to just keep flipping
+ self.cursor.nextset()
+
+ self._lastrowid = int(row[0])
+ else:
+ super(MSExecutionContext_pyodbc, self).post_exec()
+
+
+class MSDialect_pyodbc(PyODBCConnector, MSDialect):
+
+ execution_ctx_cls = MSExecutionContext_pyodbc
+
+ colspecs = util.update_copy(
+ MSDialect.colspecs,
+ {
+ sqltypes.Numeric: _MSNumeric_pyodbc,
+ sqltypes.Float: _MSFloat_pyodbc,
+ VARBINARY: _VARBINARY_pyodbc,
+ sqltypes.LargeBinary: _VARBINARY_pyodbc,
+ }
+ )
+
+ def __init__(self, description_encoding=None, **params):
+ if 'description_encoding' in params:
+ self.description_encoding = params.pop('description_encoding')
+ super(MSDialect_pyodbc, self).__init__(**params)
+ self.use_scope_identity = self.use_scope_identity and \
+ self.dbapi and \
+ hasattr(self.dbapi.Cursor, 'nextset')
+ self._need_decimal_fix = self.dbapi and \
+ self._dbapi_version() < (2, 1, 8)
+
+ def _get_server_version_info(self, connection):
+ try:
+ raw = connection.scalar("SELECT SERVERPROPERTY('ProductVersion')")
+ except exc.DBAPIError:
+ # SQL Server docs indicate this function isn't present prior to
+ # 2008; additionally, unknown combinations of pyodbc aren't
+ # able to run this query.
+ return super(MSDialect_pyodbc, self).\
+ _get_server_version_info(connection)
+ else:
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(raw):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+dialect = MSDialect_pyodbc
diff --git a/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py
new file mode 100644
index 0000000..eaf5c96
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mssql/zxjdbc.py
@@ -0,0 +1,69 @@
+# mssql/zxjdbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mssql+zxjdbc
+ :name: zxJDBC for Jython
+ :dbapi: zxjdbc
+ :connectstring: mssql+zxjdbc://user:pass@host:port/dbname\
+[?key=value&key=value...]
+ :driverurl: http://jtds.sourceforge.net/
+
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
+
+"""
+from ...connectors.zxJDBC import ZxJDBCConnector
+from .base import MSDialect, MSExecutionContext
+from ... import engine
+
+
+class MSExecutionContext_zxjdbc(MSExecutionContext):
+
+ _embedded_scope_identity = False
+
+ def pre_exec(self):
+ super(MSExecutionContext_zxjdbc, self).pre_exec()
+ # scope_identity after the fact returns null in jTDS so we must
+ # embed it
+ if self._select_lastrowid and self.dialect.use_scope_identity:
+ self._embedded_scope_identity = True
+ self.statement += "; SELECT scope_identity()"
+
+ def post_exec(self):
+ if self._embedded_scope_identity:
+ while True:
+ try:
+ row = self.cursor.fetchall()[0]
+ break
+ except self.dialect.dbapi.Error:
+ self.cursor.nextset()
+ self._lastrowid = int(row[0])
+
+ if (self.isinsert or self.isupdate or self.isdelete) and \
+ self.compiled.returning:
+ self._result_proxy = engine.FullyBufferedResultProxy(self)
+
+ if self._enable_identity_insert:
+ table = self.dialect.identifier_preparer.format_table(
+ self.compiled.statement.table)
+ self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
+
+
+class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
+ jdbc_db_name = 'jtds:sqlserver'
+ jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
+
+ execution_ctx_cls = MSExecutionContext_zxjdbc
+
+ def _get_server_version_info(self, connection):
+ return tuple(
+ int(x)
+ for x in connection.connection.dbversion.split('.')
+ )
+
+dialect = MSDialect_zxjdbc
diff --git a/app/lib/sqlalchemy/dialects/mysql/__init__.py b/app/lib/sqlalchemy/dialects/mysql/__init__.py
new file mode 100644
index 0000000..2ff8542
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/__init__.py
@@ -0,0 +1,31 @@
+# mysql/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from . import base, mysqldb, oursql, \
+ pyodbc, zxjdbc, mysqlconnector, pymysql,\
+ gaerdbms, cymysql
+
+# default dialect
+base.dialect = mysqldb.dialect
+
+from .base import \
+ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
+ DECIMAL, DOUBLE, ENUM, DECIMAL,\
+ FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
+ MEDIUMINT, MEDIUMTEXT, NCHAR, \
+ NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
+ TINYBLOB, TINYINT, TINYTEXT,\
+ VARBINARY, VARCHAR, YEAR, dialect
+
+__all__ = (
+ 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
+ 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
+ 'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
+ 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
+ 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
+ 'YEAR', 'dialect'
+)
diff --git a/app/lib/sqlalchemy/dialects/mysql/base.py b/app/lib/sqlalchemy/dialects/mysql/base.py
new file mode 100644
index 0000000..822e932
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/base.py
@@ -0,0 +1,2056 @@
+# mysql/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+
+.. dialect:: mysql
+ :name: MySQL
+
+Supported Versions and Features
+-------------------------------
+
+SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
+However, no heroic measures are taken to work around major missing
+SQL features - if your server version does not support sub-selects, for
+example, they won't work in SQLAlchemy either.
+
+See the official MySQL documentation for detailed information about features
+supported in any given server release.
+
+.. _mysql_connection_timeouts:
+
+Connection Timeouts
+-------------------
+
+MySQL features an automatic connection close behavior, for connections that
+have been idle for eight hours or more. To circumvent having this issue, use
+the ``pool_recycle`` option which controls the maximum age of any connection::
+
+ engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
+
+.. seealso::
+
+ :ref:`pool_setting_recycle` - full description of the pool recycle feature.
+
+
+.. _mysql_storage_engines:
+
+CREATE TABLE arguments including Storage Engines
+------------------------------------------------
+
+MySQL's CREATE TABLE syntax includes a wide array of special options,
+including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
+``INSERT_METHOD``, and many more.
+To accommodate the rendering of these arguments, specify the form
+``mysql_argument_name="value"``. For example, to specify a table with
+``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE``
+of ``1024``::
+
+ Table('mytable', metadata,
+ Column('data', String(32)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ mysql_key_block_size="1024"
+ )
+
+The MySQL dialect will normally transfer any keyword specified as
+``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
+``CREATE TABLE`` statement. A handful of these names will render with a space
+instead of an underscore; to support this, the MySQL dialect has awareness of
+these particular names, which include ``DATA DIRECTORY``
+(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
+``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
+``mysql_index_directory``).
+
+The most common argument is ``mysql_engine``, which refers to the storage
+engine for the table. Historically, MySQL server installations would default
+to ``MyISAM`` for this value, although newer versions may be defaulting
+to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
+of transactions and foreign keys.
+
+A :class:`.Table` that is created in a MySQL database with a storage engine
+of ``MyISAM`` will be essentially non-transactional, meaning any
+INSERT/UPDATE/DELETE statement referring to this table will be invoked as
+autocommit. It also will have no support for foreign key constraints; while
+the ``CREATE TABLE`` statement accepts foreign key options, when using the
+``MyISAM`` storage engine these arguments are discarded. Reflecting such a
+table will also produce no foreign key constraint information.
+
+For fully atomic transactions as well as support for foreign key
+constraints, all participating ``CREATE TABLE`` statements must specify a
+transactional engine, which in the vast majority of cases is ``InnoDB``.
+
+.. seealso::
+
+ `The InnoDB Storage Engine
+ `_ -
+ on the MySQL website.
+
+Case Sensitivity and Table Reflection
+-------------------------------------
+
+MySQL has inconsistent support for case-sensitive identifier
+names, basing support on specific details of the underlying
+operating system. However, it has been observed that no matter
+what case sensitivity behavior is present, the names of tables in
+foreign key declarations are *always* received from the database
+as all-lower case, making it impossible to accurately reflect a
+schema where inter-related tables use mixed-case identifier names.
+
+Therefore it is strongly advised that table names be declared as
+all lower case both within SQLAlchemy as well as on the MySQL
+database itself, especially if database reflection features are
+to be used.
+
+.. _mysql_isolation_level:
+
+Transaction Isolation Level
+---------------------------
+
+All MySQL dialects support setting of transaction isolation level
+both via a dialect-specific parameter :paramref:`.create_engine.isolation_level`
+accepted by :func:`.create_engine`,
+as well as the :paramref:`.Connection.execution_options.isolation_level`
+argument as passed to :meth:`.Connection.execution_options`.
+This feature works by issuing the command
+``SET SESSION TRANSACTION ISOLATION LEVEL `` for
+each new connection. For the special AUTOCOMMIT isolation level, DBAPI-specific
+techniques are used.
+
+To set isolation level using :func:`.create_engine`::
+
+ engine = create_engine(
+ "mysql://scott:tiger@localhost/test",
+ isolation_level="READ UNCOMMITTED"
+ )
+
+To set using per-connection execution options::
+
+ connection = engine.connect()
+ connection = connection.execution_options(
+ isolation_level="READ COMMITTED"
+ )
+
+Valid values for ``isolation_level`` include:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+The special ``AUTOCOMMIT`` value makes use of the various "autocommit"
+attributes provided by specific DBAPIs, and is currently supported by
+MySQLdb, MySQL-Client, MySQL-Connector Python, and PyMySQL. Using it,
+the MySQL connection will return true for the value of
+``SELECT @@autocommit;``.
+
+.. versionadded:: 1.1 - added support for the AUTOCOMMIT isolation level.
+
+AUTO_INCREMENT Behavior
+-----------------------
+
+When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
+the first :class:`.Integer` primary key column which is not marked as a
+foreign key::
+
+ >>> t = Table('mytable', metadata,
+ ... Column('mytable_id', Integer, primary_key=True)
+ ... )
+ >>> t.create()
+ CREATE TABLE mytable (
+ id INTEGER NOT NULL AUTO_INCREMENT,
+ PRIMARY KEY (id)
+ )
+
+You can disable this behavior by passing ``False`` to the
+:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag
+can also be used to enable auto-increment on a secondary column in a
+multi-column key for some storage engines::
+
+ Table('mytable', metadata,
+ Column('gid', Integer, primary_key=True, autoincrement=False),
+ Column('id', Integer, primary_key=True)
+ )
+
+.. _mysql_ss_cursors:
+
+Server Side Cursors
+-------------------
+
+Server-side cursor support is available for the MySQLdb and PyMySQL dialects.
+From a MySQL point of view this means that the ``MySQLdb.cursors.SSCursor`` or
+``pymysql.cursors.SSCursor`` class is used when building up the cursor which
+will receive results. The most typical way of invoking this feature is via the
+:paramref:`.Connection.execution_options.stream_results` connection execution
+option. Server side cursors can also be enabled for all SELECT statements
+unconditionally by passing ``server_side_cursors=True`` to
+:func:`.create_engine`.
+
+.. versionadded:: 1.1.4 - added server-side cursor support.
+
+.. _mysql_unicode:
+
+Unicode
+-------
+
+Charset Selection
+~~~~~~~~~~~~~~~~~
+
+Most MySQL DBAPIs offer the option to set the client character set for
+a connection. This is typically delivered using the ``charset`` parameter
+in the URL, such as::
+
+ e = create_engine("mysql+pymysql://scott:tiger@localhost/\
+test?charset=utf8")
+
+This charset is the **client character set** for the connection. Some
+MySQL DBAPIs will default this to a value such as ``latin1``, and some
+will make use of the ``default-character-set`` setting in the ``my.cnf``
+file as well. Documentation for the DBAPI in use should be consulted
+for specific behavior.
+
+The encoding used for Unicode has traditionally been ``'utf8'``. However,
+for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding
+``'utf8mb4'`` has been introduced. The rationale for this new encoding
+is due to the fact that MySQL's utf-8 encoding only supports
+codepoints up to three bytes instead of four. Therefore,
+when communicating with a MySQL database
+that includes codepoints more than three bytes in size,
+this new charset is preferred, if supported by both the database as well
+as the client DBAPI, as in::
+
+ e = create_engine("mysql+pymysql://scott:tiger@localhost/\
+test?charset=utf8mb4")
+
+At the moment, up-to-date versions of MySQLdb and PyMySQL support the
+``utf8mb4`` charset. Other DBAPIs such as MySQL-Connector and OurSQL
+may **not** support it as of yet.
+
+In order to use ``utf8mb4`` encoding, changes to
+the MySQL schema and/or server configuration may be required.
+
+.. seealso::
+
+ `The utf8mb4 Character Set \
+`_ - \
+in the MySQL documentation
+
+Unicode Encoding / Decoding
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All modern MySQL DBAPIs all offer the service of handling the encoding and
+decoding of unicode data between the Python application space and the database.
+As this was not always the case, SQLAlchemy also includes a comprehensive system
+of performing the encode/decode task as well. As only one of these systems
+should be in use at at time, SQLAlchemy has long included functionality
+to automatically detect upon first connection whether or not the DBAPI is
+automatically handling unicode.
+
+Whether or not the MySQL DBAPI will handle encoding can usually be configured
+using a DBAPI flag ``use_unicode``, which is known to be supported at least
+by MySQLdb, PyMySQL, and MySQL-Connector. Setting this value to ``0``
+in the "connect args" or query string will have the effect of disabling the
+DBAPI's handling of unicode, such that it instead will return data of the
+``str`` type or ``bytes`` type, with data in the configured charset::
+
+ # connect while disabling the DBAPI's unicode encoding/decoding
+ e = create_engine("mysql+mysqldb://scott:tiger@localhost/test?charset=utf8&use_unicode=0")
+
+Current recommendations for modern DBAPIs are as follows:
+
+* It is generally always safe to leave the ``use_unicode`` flag set at
+ its default; that is, don't use it at all.
+* Under Python 3, the ``use_unicode=0`` flag should **never be used**.
+ SQLAlchemy under Python 3 generally assumes the DBAPI receives and returns
+ string values as Python 3 strings, which are inherently unicode objects.
+* Under Python 2 with MySQLdb, the ``use_unicode=0`` flag will **offer
+ superior performance**, as MySQLdb's unicode converters under Python 2 only
+ have been observed to have unusually slow performance compared to SQLAlchemy's
+ fast C-based encoders/decoders.
+
+In short: don't specify ``use_unicode`` *at all*, with the possible
+exception of ``use_unicode=0`` on MySQLdb with Python 2 **only** for a
+potential performance gain.
+
+Ansi Quoting Style
+------------------
+
+MySQL features two varieties of identifier "quoting style", one using
+backticks and the other using quotes, e.g. ```some_identifier``` vs.
+``"some_identifier"``. All MySQL dialects detect which version
+is in use by checking the value of ``sql_mode`` when a connection is first
+established with a particular :class:`.Engine`. This quoting style comes
+into play when rendering table and column names as well as when reflecting
+existing database structures. The detection is entirely automatic and
+no special configuration is needed to use either quoting style.
+
+.. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic,
+ there's no longer any end-user ``create_engine()`` options in this regard.
+
+MySQL SQL Extensions
+--------------------
+
+Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
+function and operator support::
+
+ table.select(table.c.password==func.md5('plaintext'))
+ table.select(table.c.username.op('regexp')('^[a-d]'))
+
+And of course any valid MySQL statement can be executed as a string as well.
+
+Some limited direct support for MySQL extensions to SQL is currently
+available.
+
+* SELECT pragma::
+
+ select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
+
+* UPDATE with LIMIT::
+
+ update(..., mysql_limit=10)
+
+rowcount Support
+----------------
+
+SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
+usual definition of "number of rows matched by an UPDATE or DELETE" statement.
+This is in contradiction to the default setting on most MySQL DBAPI drivers,
+which is "number of rows actually modified/deleted". For this reason, the
+SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
+flag, or whatever is equivalent for the target dialect, upon connection.
+This setting is currently hardcoded.
+
+.. seealso::
+
+ :attr:`.ResultProxy.rowcount`
+
+
+CAST Support
+------------
+
+MySQL documents the CAST operator as available in version 4.0.2. When using
+the SQLAlchemy :func:`.cast` function, SQLAlchemy
+will not render the CAST token on MySQL before this version, based on server
+version detection, instead rendering the internal expression directly.
+
+CAST may still not be desirable on an early MySQL version post-4.0.2, as it
+didn't add all datatype support until 4.1.1. If your application falls into
+this narrow area, the behavior of CAST can be controlled using the
+:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
+
+ from sqlalchemy.sql.expression import Cast
+ from sqlalchemy.ext.compiler import compiles
+
+ @compiles(Cast, 'mysql')
+ def _check_mysql_version(element, compiler, **kw):
+ if compiler.dialect.server_version_info < (4, 1, 0):
+ return compiler.process(element.clause, **kw)
+ else:
+ return compiler.visit_cast(element, **kw)
+
+The above function, which only needs to be declared once
+within an application, overrides the compilation of the
+:func:`.cast` construct to check for version 4.1.0 before
+fully rendering CAST; else the internal element of the
+construct is rendered directly.
+
+
+.. _mysql_indexes:
+
+MySQL Specific Index Options
+----------------------------
+
+MySQL-specific extensions to the :class:`.Index` construct are available.
+
+Index Length
+~~~~~~~~~~~~~
+
+MySQL provides an option to create index entries with a certain length, where
+"length" refers to the number of characters or bytes in each value which will
+become part of the index. SQLAlchemy provides this feature via the
+``mysql_length`` parameter::
+
+ Index('my_index', my_table.c.data, mysql_length=10)
+
+ Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
+ 'b': 9})
+
+Prefix lengths are given in characters for nonbinary string types and in bytes
+for binary string types. The value passed to the keyword argument *must* be
+either an integer (and, thus, specify the same prefix length value for all
+columns of the index) or a dict in which keys are column names and values are
+prefix length values for corresponding columns. MySQL only allows a length for
+a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
+BLOB.
+
+.. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary
+ for use with composite indexes.
+
+Index Prefixes
+~~~~~~~~~~~~~~
+
+MySQL storage engines permit you to specify an index prefix when creating
+an index. SQLAlchemy provides this feature via the
+``mysql_prefix`` parameter on :class:`.Index`::
+
+ Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
+
+The value passed to the keyword argument will be simply passed through to the
+underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
+storage engine.
+
+.. versionadded:: 1.1.5
+
+.. seealso::
+
+ `CREATE INDEX `_ - MySQL documentation
+
+Index Types
+~~~~~~~~~~~~~
+
+Some MySQL storage engines permit you to specify an index type when creating
+an index or primary key constraint. SQLAlchemy provides this feature via the
+``mysql_using`` parameter on :class:`.Index`::
+
+ Index('my_index', my_table.c.data, mysql_using='hash')
+
+As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
+
+ PrimaryKeyConstraint("data", mysql_using='hash')
+
+The value passed to the keyword argument will be simply passed through to the
+underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
+type for your MySQL storage engine.
+
+More information can be found at:
+
+http://dev.mysql.com/doc/refman/5.0/en/create-index.html
+
+http://dev.mysql.com/doc/refman/5.0/en/create-table.html
+
+.. _mysql_foreign_keys:
+
+MySQL Foreign Keys
+------------------
+
+MySQL's behavior regarding foreign keys has some important caveats.
+
+Foreign Key Arguments to Avoid
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
+or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
+:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of
+these keywords being rendered in a DDL expression, which will then raise an
+error on MySQL. In order to use these keywords on a foreign key while having
+them ignored on a MySQL backend, use a custom compile rule::
+
+ from sqlalchemy.ext.compiler import compiles
+ from sqlalchemy.schema import ForeignKeyConstraint
+
+ @compiles(ForeignKeyConstraint, "mysql")
+ def process(element, compiler, **kw):
+ element.deferrable = element.initially = None
+ return compiler.visit_foreign_key_constraint(element, **kw)
+
+.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
+ the ``deferrable`` or ``initially`` keyword arguments of
+ :class:`.ForeignKeyConstraint` and :class:`.ForeignKey`.
+
+The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
+by SQLAlchemy in conjunction with the MySQL backend. This argument is
+silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
+DELETE options also being ignored by the backend. Therefore MATCH should
+never be used with the MySQL backend; as is the case with DEFERRABLE and
+INITIALLY, custom compilation rules can be used to correct a MySQL
+ForeignKeyConstraint at DDL definition time.
+
+.. versionadded:: 0.9.0 - the MySQL backend will raise a
+ :class:`.CompileError` when the ``match`` keyword is used with
+ :class:`.ForeignKeyConstraint` or :class:`.ForeignKey`.
+
+Reflection of Foreign Key Constraints
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Not all MySQL storage engines support foreign keys. When using the
+very common ``MyISAM`` MySQL storage engine, the information loaded by table
+reflection will not include foreign keys. For these tables, you may supply a
+:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
+
+ Table('mytable', metadata,
+ ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
+ autoload=True
+ )
+
+.. seealso::
+
+ :ref:`mysql_storage_engines`
+
+.. _mysql_unique_constraints:
+
+MySQL Unique Constraints and Reflection
+---------------------------------------
+
+SQLAlchemy supports both the :class:`.Index` construct with the
+flag ``unique=True``, indicating a UNIQUE index, as well as the
+:class:`.UniqueConstraint` construct, representing a UNIQUE constraint.
+Both objects/syntaxes are supported by MySQL when emitting DDL to create
+these constraints. However, MySQL does not have a unique constraint
+construct that is separate from a unique index; that is, the "UNIQUE"
+constraint on MySQL is equivalent to creating a "UNIQUE INDEX".
+
+When reflecting these constructs, the :meth:`.Inspector.get_indexes`
+and the :meth:`.Inspector.get_unique_constraints` methods will **both**
+return an entry for a UNIQUE index in MySQL. However, when performing
+full table reflection using ``Table(..., autoload=True)``,
+the :class:`.UniqueConstraint` construct is
+**not** part of the fully reflected :class:`.Table` construct under any
+circumstances; this construct is always represented by a :class:`.Index`
+with the ``unique=True`` setting present in the :attr:`.Table.indexes`
+collection.
+
+
+.. _mysql_timestamp_null:
+
+TIMESTAMP Columns and NULL
+--------------------------
+
+MySQL historically enforces that a column which specifies the
+TIMESTAMP datatype implicitly includes a default value of
+CURRENT_TIMESTAMP, even though this is not stated, and additionally
+sets the column as NOT NULL, the opposite behavior vs. that of all
+other datatypes::
+
+ mysql> CREATE TABLE ts_test (
+ -> a INTEGER,
+ -> b INTEGER NOT NULL,
+ -> c TIMESTAMP,
+ -> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ -> e TIMESTAMP NULL);
+ Query OK, 0 rows affected (0.03 sec)
+
+ mysql> SHOW CREATE TABLE ts_test;
+ +---------+-----------------------------------------------------
+ | Table | Create Table
+ +---------+-----------------------------------------------------
+ | ts_test | CREATE TABLE `ts_test` (
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) NOT NULL,
+ `c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `e` timestamp NULL DEFAULT NULL
+ ) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+Above, we see that an INTEGER column defaults to NULL, unless it is specified
+with NOT NULL. But when the column is of type TIMESTAMP, an implicit
+default of CURRENT_TIMESTAMP is generated which also coerces the column
+to be a NOT NULL, even though we did not specify it as such.
+
+This behavior of MySQL can be changed on the MySQL side using the
+`explicit_defaults_for_timestamp
+`_ configuration flag introduced in
+MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like
+any other datatype on the MySQL side with regards to defaults and nullability.
+
+However, to accommodate the vast majority of MySQL databases that do not
+specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with
+any TIMESTAMP column that does not specify ``nullable=False``. In order
+to accommodate newer databases that specify ``explicit_defaults_for_timestamp``,
+SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
+``nullable=False``. The following example illustrates::
+
+ from sqlalchemy import MetaData, Integer, Table, Column, text
+ from sqlalchemy.dialects.mysql import TIMESTAMP
+
+ m = MetaData()
+ t = Table('ts_test', m,
+ Column('a', Integer),
+ Column('b', Integer, nullable=False),
+ Column('c', TIMESTAMP),
+ Column('d', TIMESTAMP, nullable=False)
+ )
+
+
+ from sqlalchemy import create_engine
+ e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
+ m.create_all(e)
+
+output::
+
+ CREATE TABLE ts_test (
+ a INTEGER,
+ b INTEGER NOT NULL,
+ c TIMESTAMP NULL,
+ d TIMESTAMP NOT NULL
+ )
+
+.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all
+ cases for TIMESTAMP columns, to accommodate
+ ``explicit_defaults_for_timestamp``. Prior to this version, it will
+ not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``.
+
+"""
+
+import re
+import sys
+import json
+
+from ... import schema as sa_schema
+from ... import exc, log, sql, util
+from ...sql import compiler, elements
+from array import array as _array
+
+from ...engine import reflection
+from ...engine import default
+from ... import types as sqltypes
+from ...util import topological
+from ...types import DATE, BOOLEAN, \
+ BLOB, BINARY, VARBINARY
+
+from . import reflection as _reflection
+from .types import BIGINT, BIT, CHAR, DECIMAL, DATETIME, \
+ DOUBLE, FLOAT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, \
+ MEDIUMTEXT, NCHAR, NUMERIC, NVARCHAR, REAL, SMALLINT, TEXT, TIME, \
+ TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT, VARCHAR, YEAR
+from .types import _StringType, _IntegerType, _NumericType, \
+ _FloatType, _MatchType
+from .enumerated import ENUM, SET
+from .json import JSON, JSONIndexType, JSONPathType
+
+
+RESERVED_WORDS = set(
+ ['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
+ 'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
+ 'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
+ 'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
+ 'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
+ 'current_user', 'cursor', 'database', 'databases', 'day_hour',
+ 'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
+ 'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
+ 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
+ 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
+ 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
+ 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group',
+ 'having', 'high_priority', 'hour_microsecond', 'hour_minute',
+ 'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout',
+ 'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8',
+ 'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys',
+ 'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines',
+ 'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob',
+ 'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert',
+ 'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
+ 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
+ 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
+ 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
+ 'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
+ 'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
+ 'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
+ 'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
+ 'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
+ 'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
+ 'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
+ 'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
+ 'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
+ 'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
+ 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
+ 'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
+
+ 'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
+
+ 'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
+
+ 'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
+ 'read_only', 'read_write', # 5.1
+
+ 'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue',
+ 'resignal', 'signal', 'slow', # 5.5
+
+ 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
+ 'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
+
+ 'generated', 'optimizer_costs', 'stored', 'virtual', # 5.7
+
+ 'admin', 'except', 'grouping', 'of', 'persist', 'recursive',
+ 'role', # 8.0
+
+ ])
+
+AUTOCOMMIT_RE = re.compile(
+ r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
+ re.I | re.UNICODE)
+SET_RE = re.compile(
+ r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
+ re.I | re.UNICODE)
+
+
+# old names
+MSTime = TIME
+MSSet = SET
+MSEnum = ENUM
+MSLongBlob = LONGBLOB
+MSMediumBlob = MEDIUMBLOB
+MSTinyBlob = TINYBLOB
+MSBlob = BLOB
+MSBinary = BINARY
+MSVarBinary = VARBINARY
+MSNChar = NCHAR
+MSNVarChar = NVARCHAR
+MSChar = CHAR
+MSString = VARCHAR
+MSLongText = LONGTEXT
+MSMediumText = MEDIUMTEXT
+MSTinyText = TINYTEXT
+MSText = TEXT
+MSYear = YEAR
+MSTimeStamp = TIMESTAMP
+MSBit = BIT
+MSSmallInteger = SMALLINT
+MSTinyInteger = TINYINT
+MSMediumInteger = MEDIUMINT
+MSBigInteger = BIGINT
+MSNumeric = NUMERIC
+MSDecimal = DECIMAL
+MSDouble = DOUBLE
+MSReal = REAL
+MSFloat = FLOAT
+MSInteger = INTEGER
+
+colspecs = {
+ _IntegerType: _IntegerType,
+ _NumericType: _NumericType,
+ _FloatType: _FloatType,
+ sqltypes.Numeric: NUMERIC,
+ sqltypes.Float: FLOAT,
+ sqltypes.Time: TIME,
+ sqltypes.Enum: ENUM,
+ sqltypes.MatchType: _MatchType,
+ sqltypes.JSON: JSON,
+ sqltypes.JSON.JSONIndexType: JSONIndexType,
+ sqltypes.JSON.JSONPathType: JSONPathType
+
+}
+
+# Everything 3.23 through 5.1 excepting OpenGIS types.
+ischema_names = {
+ 'bigint': BIGINT,
+ 'binary': BINARY,
+ 'bit': BIT,
+ 'blob': BLOB,
+ 'boolean': BOOLEAN,
+ 'char': CHAR,
+ 'date': DATE,
+ 'datetime': DATETIME,
+ 'decimal': DECIMAL,
+ 'double': DOUBLE,
+ 'enum': ENUM,
+ 'fixed': DECIMAL,
+ 'float': FLOAT,
+ 'int': INTEGER,
+ 'integer': INTEGER,
+ 'json': JSON,
+ 'longblob': LONGBLOB,
+ 'longtext': LONGTEXT,
+ 'mediumblob': MEDIUMBLOB,
+ 'mediumint': MEDIUMINT,
+ 'mediumtext': MEDIUMTEXT,
+ 'nchar': NCHAR,
+ 'nvarchar': NVARCHAR,
+ 'numeric': NUMERIC,
+ 'set': SET,
+ 'smallint': SMALLINT,
+ 'text': TEXT,
+ 'time': TIME,
+ 'timestamp': TIMESTAMP,
+ 'tinyblob': TINYBLOB,
+ 'tinyint': TINYINT,
+ 'tinytext': TINYTEXT,
+ 'varbinary': VARBINARY,
+ 'varchar': VARCHAR,
+ 'year': YEAR,
+}
+
+
+class MySQLExecutionContext(default.DefaultExecutionContext):
+
+ def should_autocommit_text(self, statement):
+ return AUTOCOMMIT_RE.match(statement)
+
+ def create_server_side_cursor(self):
+ if self.dialect.supports_server_side_cursors:
+ return self._dbapi_connection.cursor(self.dialect._sscursor)
+ else:
+ raise NotImplementedError()
+
+
+class MySQLCompiler(compiler.SQLCompiler):
+
+ render_table_with_column_in_update_from = True
+ """Overridden from base SQLCompiler value"""
+
+ extract_map = compiler.SQLCompiler.extract_map.copy()
+ extract_map.update({'milliseconds': 'millisecond'})
+
+ def visit_random_func(self, fn, **kw):
+ return "rand%s" % self.function_argspec(fn)
+
+ def visit_utc_timestamp_func(self, fn, **kw):
+ return "UTC_TIMESTAMP"
+
+ def visit_sysdate_func(self, fn, **kw):
+ return "SYSDATE()"
+
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def visit_concat_op_binary(self, binary, operator, **kw):
+ return "concat(%s, %s)" % (self.process(binary.left),
+ self.process(binary.right))
+
+ def visit_match_op_binary(self, binary, operator, **kw):
+ return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \
+ (self.process(binary.left), self.process(binary.right))
+
+ def get_from_hint_text(self, table, text):
+ return text
+
+ def visit_typeclause(self, typeclause, type_=None):
+ if type_ is None:
+ type_ = typeclause.type.dialect_impl(self.dialect)
+ if isinstance(type_, sqltypes.TypeDecorator):
+ return self.visit_typeclause(typeclause, type_.impl)
+ elif isinstance(type_, sqltypes.Integer):
+ if getattr(type_, 'unsigned', False):
+ return 'UNSIGNED INTEGER'
+ else:
+ return 'SIGNED INTEGER'
+ elif isinstance(type_, sqltypes.TIMESTAMP):
+ return 'DATETIME'
+ elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime,
+ sqltypes.Date, sqltypes.Time)):
+ return self.dialect.type_compiler.process(type_)
+ elif isinstance(type_, sqltypes.String) \
+ and not isinstance(type_, (ENUM, SET)):
+ adapted = CHAR._adapt_string_for_cast(type_)
+ return self.dialect.type_compiler.process(adapted)
+ elif isinstance(type_, sqltypes._Binary):
+ return 'BINARY'
+ elif isinstance(type_, sqltypes.JSON):
+ return "JSON"
+ elif isinstance(type_, sqltypes.NUMERIC):
+ return self.dialect.type_compiler.process(
+ type_).replace('NUMERIC', 'DECIMAL')
+ else:
+ return None
+
+ def visit_cast(self, cast, **kw):
+ # No cast until 4, no decimals until 5.
+ if not self.dialect._supports_cast:
+ util.warn(
+ "Current MySQL version does not support "
+ "CAST; the CAST will be skipped.")
+ return self.process(cast.clause.self_group(), **kw)
+
+ type_ = self.process(cast.typeclause)
+ if type_ is None:
+ util.warn(
+ "Datatype %s does not support CAST on MySQL; "
+ "the CAST will be skipped." %
+ self.dialect.type_compiler.process(cast.typeclause.type))
+ return self.process(cast.clause.self_group(), **kw)
+
+ return 'CAST(%s AS %s)' % (self.process(cast.clause, **kw), type_)
+
+ def render_literal_value(self, value, type_):
+ value = super(MySQLCompiler, self).render_literal_value(value, type_)
+ if self.dialect._backslash_escapes:
+ value = value.replace('\\', '\\\\')
+ return value
+
+ # override native_boolean=False behavior here, as
+ # MySQL still supports native boolean
+ def visit_true(self, element, **kw):
+ return "true"
+
+ def visit_false(self, element, **kw):
+ return "false"
+
+ def get_select_precolumns(self, select, **kw):
+ """Add special MySQL keywords in place of DISTINCT.
+
+ .. note::
+
+ this usage is deprecated. :meth:`.Select.prefix_with`
+ should be used for special keywords at the start
+ of a SELECT.
+
+ """
+ if isinstance(select._distinct, util.string_types):
+ return select._distinct.upper() + " "
+ elif select._distinct:
+ return "DISTINCT "
+ else:
+ return ""
+
+ def visit_join(self, join, asfrom=False, **kwargs):
+ if join.full:
+ join_type = " FULL OUTER JOIN "
+ elif join.isouter:
+ join_type = " LEFT OUTER JOIN "
+ else:
+ join_type = " INNER JOIN "
+
+ return ''.join(
+ (self.process(join.left, asfrom=True, **kwargs),
+ join_type,
+ self.process(join.right, asfrom=True, **kwargs),
+ " ON ",
+ self.process(join.onclause, **kwargs)))
+
+ def for_update_clause(self, select, **kw):
+ if select._for_update_arg.read:
+ return " LOCK IN SHARE MODE"
+ else:
+ return " FOR UPDATE"
+
+ def limit_clause(self, select, **kw):
+ # MySQL supports:
+ # LIMIT
+ # LIMIT ,
+ # and in server versions > 3.3:
+ # LIMIT OFFSET
+ # The latter is more readable for offsets but we're stuck with the
+ # former until we can refine dialects by server revision.
+
+ limit_clause, offset_clause = select._limit_clause, \
+ select._offset_clause
+
+ if limit_clause is None and offset_clause is None:
+ return ''
+ elif offset_clause is not None:
+ # As suggested by the MySQL docs, need to apply an
+ # artificial limit if one wasn't provided
+ # http://dev.mysql.com/doc/refman/5.0/en/select.html
+ if limit_clause is None:
+ # hardwire the upper limit. Currently
+ # needed by OurSQL with Python 3
+ # (https://bugs.launchpad.net/oursql/+bug/686232),
+ # but also is consistent with the usage of the upper
+ # bound as part of MySQL's "syntax" for OFFSET with
+ # no LIMIT
+ return ' \n LIMIT %s, %s' % (
+ self.process(offset_clause, **kw),
+ "18446744073709551615")
+ else:
+ return ' \n LIMIT %s, %s' % (
+ self.process(offset_clause, **kw),
+ self.process(limit_clause, **kw))
+ else:
+ # No offset provided, so just use the limit
+ return ' \n LIMIT %s' % (self.process(limit_clause, **kw),)
+
+ def update_limit_clause(self, update_stmt):
+ limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
+ if limit:
+ return "LIMIT %s" % limit
+ else:
+ return None
+
+ def update_tables_clause(self, update_stmt, from_table,
+ extra_froms, **kw):
+ return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
+ for t in [from_table] + list(extra_froms))
+
+ def update_from_clause(self, update_stmt, from_table,
+ extra_froms, from_hints, **kw):
+ return None
+
+
+class MySQLDDLCompiler(compiler.DDLCompiler):
+ def get_column_specification(self, column, **kw):
+ """Builds column DDL."""
+
+ colspec = [
+ self.preparer.format_column(column),
+ self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ ]
+
+ is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
+
+ if not column.nullable:
+ colspec.append('NOT NULL')
+
+ # see: http://docs.sqlalchemy.org/en/latest/dialects/
+ # mysql.html#mysql_timestamp_null
+ elif column.nullable and is_timestamp:
+ colspec.append('NULL')
+
+ default = self.get_column_default_string(column)
+ if default is not None:
+ colspec.append('DEFAULT ' + default)
+
+ if column.table is not None \
+ and column is column.table._autoincrement_column and \
+ column.server_default is None:
+ colspec.append('AUTO_INCREMENT')
+
+ return ' '.join(colspec)
+
+ def post_create_table(self, table):
+ """Build table-level CREATE options like ENGINE and COLLATE."""
+
+ table_opts = []
+
+ opts = dict(
+ (
+ k[len(self.dialect.name) + 1:].upper(),
+ v
+ )
+ for k, v in table.kwargs.items()
+ if k.startswith('%s_' % self.dialect.name)
+ )
+
+ for opt in topological.sort([
+ ('DEFAULT_CHARSET', 'COLLATE'),
+ ('DEFAULT_CHARACTER_SET', 'COLLATE'),
+ ('PARTITION_BY', 'PARTITIONS'), # only for test consistency
+ ], opts):
+ arg = opts[opt]
+ if opt in _reflection._options_of_type_string:
+ arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
+
+ if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
+ 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
+ 'DEFAULT_CHARSET',
+ 'DEFAULT_COLLATE', 'PARTITION_BY'):
+ opt = opt.replace('_', ' ')
+
+ joiner = '='
+ if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
+ 'CHARACTER SET', 'COLLATE',
+ 'PARTITION BY', 'PARTITIONS'):
+ joiner = ' '
+
+ table_opts.append(joiner.join((opt, arg)))
+ return ' '.join(table_opts)
+
+ def visit_create_index(self, create):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ table = preparer.format_table(index.table)
+ columns = [self.sql_compiler.process(expr, include_table=False,
+ literal_binds=True)
+ for expr in index.expressions]
+
+ name = self._prepared_index_name(index)
+
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+
+ index_prefix = index.kwargs.get('mysql_prefix', None)
+ if index_prefix:
+ text += index_prefix + ' '
+
+ text += "INDEX %s ON %s " % (name, table)
+
+ length = index.dialect_options['mysql']['length']
+ if length is not None:
+
+ if isinstance(length, dict):
+ # length value can be a (column_name --> integer value)
+ # mapping specifying the prefix length for each column of the
+ # index
+ columns = ', '.join(
+ '%s(%d)' % (expr, length[col.name]) if col.name in length
+ else
+ (
+ '%s(%d)' % (expr, length[expr]) if expr in length
+ else '%s' % expr
+ )
+ for col, expr in zip(index.expressions, columns)
+ )
+ else:
+ # or can be an integer value specifying the same
+ # prefix length for all columns of the index
+ columns = ', '.join(
+ '%s(%d)' % (col, length)
+ for col in columns
+ )
+ else:
+ columns = ', '.join(columns)
+ text += '(%s)' % columns
+
+ using = index.dialect_options['mysql']['using']
+ if using is not None:
+ text += " USING %s" % (preparer.quote(using))
+
+ return text
+
+ def visit_primary_key_constraint(self, constraint):
+ text = super(MySQLDDLCompiler, self).\
+ visit_primary_key_constraint(constraint)
+ using = constraint.dialect_options['mysql']['using']
+ if using:
+ text += " USING %s" % (self.preparer.quote(using))
+ return text
+
+ def visit_drop_index(self, drop):
+ index = drop.element
+
+ return "\nDROP INDEX %s ON %s" % (
+ self._prepared_index_name(index,
+ include_schema=False),
+ self.preparer.format_table(index.table))
+
+ def visit_drop_constraint(self, drop):
+ constraint = drop.element
+ if isinstance(constraint, sa_schema.ForeignKeyConstraint):
+ qual = "FOREIGN KEY "
+ const = self.preparer.format_constraint(constraint)
+ elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
+ qual = "PRIMARY KEY "
+ const = ""
+ elif isinstance(constraint, sa_schema.UniqueConstraint):
+ qual = "INDEX "
+ const = self.preparer.format_constraint(constraint)
+ else:
+ qual = ""
+ const = self.preparer.format_constraint(constraint)
+ return "ALTER TABLE %s DROP %s%s" % \
+ (self.preparer.format_table(constraint.table),
+ qual, const)
+
+ def define_constraint_match(self, constraint):
+ if constraint.match is not None:
+ raise exc.CompileError(
+ "MySQL ignores the 'MATCH' keyword while at the same time "
+ "causes ON UPDATE/ON DELETE clauses to be ignored.")
+ return ""
+
+
+class MySQLTypeCompiler(compiler.GenericTypeCompiler):
+ def _extend_numeric(self, type_, spec):
+ "Extend a numeric-type declaration with MySQL specific extensions."
+
+ if not self._mysql_type(type_):
+ return spec
+
+ if type_.unsigned:
+ spec += ' UNSIGNED'
+ if type_.zerofill:
+ spec += ' ZEROFILL'
+ return spec
+
+ def _extend_string(self, type_, defaults, spec):
+ """Extend a string-type declaration with standard SQL CHARACTER SET /
+ COLLATE annotations and MySQL specific extensions.
+
+ """
+
+ def attr(name):
+ return getattr(type_, name, defaults.get(name))
+
+ if attr('charset'):
+ charset = 'CHARACTER SET %s' % attr('charset')
+ elif attr('ascii'):
+ charset = 'ASCII'
+ elif attr('unicode'):
+ charset = 'UNICODE'
+ else:
+ charset = None
+
+ if attr('collation'):
+ collation = 'COLLATE %s' % type_.collation
+ elif attr('binary'):
+ collation = 'BINARY'
+ else:
+ collation = None
+
+ if attr('national'):
+ # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
+ return ' '.join([c for c in ('NATIONAL', spec, collation)
+ if c is not None])
+ return ' '.join([c for c in (spec, charset, collation)
+ if c is not None])
+
+ def _mysql_type(self, type_):
+ return isinstance(type_, (_StringType, _NumericType))
+
+ def visit_NUMERIC(self, type_, **kw):
+ if type_.precision is None:
+ return self._extend_numeric(type_, "NUMERIC")
+ elif type_.scale is None:
+ return self._extend_numeric(type_,
+ "NUMERIC(%(precision)s)" %
+ {'precision': type_.precision})
+ else:
+ return self._extend_numeric(type_,
+ "NUMERIC(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
+
+ def visit_DECIMAL(self, type_, **kw):
+ if type_.precision is None:
+ return self._extend_numeric(type_, "DECIMAL")
+ elif type_.scale is None:
+ return self._extend_numeric(type_,
+ "DECIMAL(%(precision)s)" %
+ {'precision': type_.precision})
+ else:
+ return self._extend_numeric(type_,
+ "DECIMAL(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
+
+ def visit_DOUBLE(self, type_, **kw):
+ if type_.precision is not None and type_.scale is not None:
+ return self._extend_numeric(type_,
+ "DOUBLE(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
+ else:
+ return self._extend_numeric(type_, 'DOUBLE')
+
+ def visit_REAL(self, type_, **kw):
+ if type_.precision is not None and type_.scale is not None:
+ return self._extend_numeric(type_,
+ "REAL(%(precision)s, %(scale)s)" %
+ {'precision': type_.precision,
+ 'scale': type_.scale})
+ else:
+ return self._extend_numeric(type_, 'REAL')
+
+ def visit_FLOAT(self, type_, **kw):
+ if self._mysql_type(type_) and \
+ type_.scale is not None and \
+ type_.precision is not None:
+ return self._extend_numeric(
+ type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale))
+ elif type_.precision is not None:
+ return self._extend_numeric(type_,
+ "FLOAT(%s)" % (type_.precision,))
+ else:
+ return self._extend_numeric(type_, "FLOAT")
+
+ def visit_INTEGER(self, type_, **kw):
+ if self._mysql_type(type_) and type_.display_width is not None:
+ return self._extend_numeric(
+ type_, "INTEGER(%(display_width)s)" %
+ {'display_width': type_.display_width})
+ else:
+ return self._extend_numeric(type_, "INTEGER")
+
+ def visit_BIGINT(self, type_, **kw):
+ if self._mysql_type(type_) and type_.display_width is not None:
+ return self._extend_numeric(
+ type_, "BIGINT(%(display_width)s)" %
+ {'display_width': type_.display_width})
+ else:
+ return self._extend_numeric(type_, "BIGINT")
+
+ def visit_MEDIUMINT(self, type_, **kw):
+ if self._mysql_type(type_) and type_.display_width is not None:
+ return self._extend_numeric(
+ type_, "MEDIUMINT(%(display_width)s)" %
+ {'display_width': type_.display_width})
+ else:
+ return self._extend_numeric(type_, "MEDIUMINT")
+
+ def visit_TINYINT(self, type_, **kw):
+ if self._mysql_type(type_) and type_.display_width is not None:
+ return self._extend_numeric(type_,
+ "TINYINT(%s)" % type_.display_width)
+ else:
+ return self._extend_numeric(type_, "TINYINT")
+
+ def visit_SMALLINT(self, type_, **kw):
+ if self._mysql_type(type_) and type_.display_width is not None:
+ return self._extend_numeric(type_,
+ "SMALLINT(%(display_width)s)" %
+ {'display_width': type_.display_width}
+ )
+ else:
+ return self._extend_numeric(type_, "SMALLINT")
+
+ def visit_BIT(self, type_, **kw):
+ if type_.length is not None:
+ return "BIT(%s)" % type_.length
+ else:
+ return "BIT"
+
+ def visit_DATETIME(self, type_, **kw):
+ if getattr(type_, 'fsp', None):
+ return "DATETIME(%d)" % type_.fsp
+ else:
+ return "DATETIME"
+
+ def visit_DATE(self, type_, **kw):
+ return "DATE"
+
+ def visit_TIME(self, type_, **kw):
+ if getattr(type_, 'fsp', None):
+ return "TIME(%d)" % type_.fsp
+ else:
+ return "TIME"
+
+ def visit_TIMESTAMP(self, type_, **kw):
+ if getattr(type_, 'fsp', None):
+ return "TIMESTAMP(%d)" % type_.fsp
+ else:
+ return "TIMESTAMP"
+
+ def visit_YEAR(self, type_, **kw):
+ if type_.display_width is None:
+ return "YEAR"
+ else:
+ return "YEAR(%s)" % type_.display_width
+
+ def visit_TEXT(self, type_, **kw):
+ if type_.length:
+ return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
+ else:
+ return self._extend_string(type_, {}, "TEXT")
+
+ def visit_TINYTEXT(self, type_, **kw):
+ return self._extend_string(type_, {}, "TINYTEXT")
+
+ def visit_MEDIUMTEXT(self, type_, **kw):
+ return self._extend_string(type_, {}, "MEDIUMTEXT")
+
+ def visit_LONGTEXT(self, type_, **kw):
+ return self._extend_string(type_, {}, "LONGTEXT")
+
+ def visit_VARCHAR(self, type_, **kw):
+ if type_.length:
+ return self._extend_string(
+ type_, {}, "VARCHAR(%d)" % type_.length)
+ else:
+ raise exc.CompileError(
+ "VARCHAR requires a length on dialect %s" %
+ self.dialect.name)
+
+ def visit_CHAR(self, type_, **kw):
+ if type_.length:
+ return self._extend_string(type_, {}, "CHAR(%(length)s)" %
+ {'length': type_.length})
+ else:
+ return self._extend_string(type_, {}, "CHAR")
+
+ def visit_NVARCHAR(self, type_, **kw):
+ # We'll actually generate the equiv. "NATIONAL VARCHAR" instead
+ # of "NVARCHAR".
+ if type_.length:
+ return self._extend_string(
+ type_, {'national': True},
+ "VARCHAR(%(length)s)" % {'length': type_.length})
+ else:
+ raise exc.CompileError(
+ "NVARCHAR requires a length on dialect %s" %
+ self.dialect.name)
+
+ def visit_NCHAR(self, type_, **kw):
+ # We'll actually generate the equiv.
+ # "NATIONAL CHAR" instead of "NCHAR".
+ if type_.length:
+ return self._extend_string(
+ type_, {'national': True},
+ "CHAR(%(length)s)" % {'length': type_.length})
+ else:
+ return self._extend_string(type_, {'national': True}, "CHAR")
+
+ def visit_VARBINARY(self, type_, **kw):
+ return "VARBINARY(%d)" % type_.length
+
+ def visit_JSON(self, type_, **kw):
+ return "JSON"
+
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BLOB(type_)
+
+ def visit_enum(self, type_, **kw):
+ if not type_.native_enum:
+ return super(MySQLTypeCompiler, self).visit_enum(type_)
+ else:
+ return self._visit_enumerated_values("ENUM", type_, type_.enums)
+
+ def visit_BLOB(self, type_, **kw):
+ if type_.length:
+ return "BLOB(%d)" % type_.length
+ else:
+ return "BLOB"
+
+ def visit_TINYBLOB(self, type_, **kw):
+ return "TINYBLOB"
+
+ def visit_MEDIUMBLOB(self, type_, **kw):
+ return "MEDIUMBLOB"
+
+ def visit_LONGBLOB(self, type_, **kw):
+ return "LONGBLOB"
+
+ def _visit_enumerated_values(self, name, type_, enumerated_values):
+ quoted_enums = []
+ for e in enumerated_values:
+ quoted_enums.append("'%s'" % e.replace("'", "''"))
+ return self._extend_string(type_, {}, "%s(%s)" % (
+ name, ",".join(quoted_enums))
+ )
+
+ def visit_ENUM(self, type_, **kw):
+ return self._visit_enumerated_values("ENUM", type_,
+ type_._enumerated_values)
+
+ def visit_SET(self, type_, **kw):
+ return self._visit_enumerated_values("SET", type_,
+ type_._enumerated_values)
+
+ def visit_BOOLEAN(self, type, **kw):
+ return "BOOL"
+
+
+class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
+
+ reserved_words = RESERVED_WORDS
+
+ def __init__(self, dialect, server_ansiquotes=False, **kw):
+ if not server_ansiquotes:
+ quote = "`"
+ else:
+ quote = '"'
+
+ super(MySQLIdentifierPreparer, self).__init__(
+ dialect,
+ initial_quote=quote,
+ escape_quote=quote)
+
+ def _quote_free_identifiers(self, *ids):
+ """Unilaterally identifier-quote any number of strings."""
+
+ return tuple([self.quote_identifier(i) for i in ids if i is not None])
+
+
+@log.class_logger
+class MySQLDialect(default.DefaultDialect):
+ """Details of the MySQL dialect.
+ Not used directly in application code.
+ """
+
+ name = 'mysql'
+ supports_alter = True
+
+ # MySQL has no true "boolean" type; we
+ # allow for the "true" and "false" keywords, however
+ supports_native_boolean = False
+
+ # identifiers are 64, however aliases can be 255...
+ max_identifier_length = 255
+ max_index_name_length = 64
+
+ supports_native_enum = True
+
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = False
+ supports_multivalues_insert = True
+
+ default_paramstyle = 'format'
+ colspecs = colspecs
+
+ statement_compiler = MySQLCompiler
+ ddl_compiler = MySQLDDLCompiler
+ type_compiler = MySQLTypeCompiler
+ ischema_names = ischema_names
+ preparer = MySQLIdentifierPreparer
+
+ # default SQL compilation settings -
+ # these are modified upon initialize(),
+ # i.e. first connect
+ _backslash_escapes = True
+ _server_ansiquotes = False
+
+ construct_arguments = [
+ (sa_schema.Table, {
+ "*": None
+ }),
+ (sql.Update, {
+ "limit": None
+ }),
+ (sa_schema.PrimaryKeyConstraint, {
+ "using": None
+ }),
+ (sa_schema.Index, {
+ "using": None,
+ "length": None,
+ "prefix": None,
+ })
+ ]
+
+ def __init__(self, isolation_level=None, json_serializer=None,
+ json_deserializer=None, **kwargs):
+ kwargs.pop('use_ansiquotes', None) # legacy
+ default.DefaultDialect.__init__(self, **kwargs)
+ self.isolation_level = isolation_level
+ self._json_serializer = json_serializer
+ self._json_deserializer = json_deserializer
+
+ def on_connect(self):
+ if self.isolation_level is not None:
+ def connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ return connect
+ else:
+ return None
+
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ'])
+
+ def set_isolation_level(self, connection, level):
+ level = level.replace('_', ' ')
+
+ # adjust for ConnectionFairy being present
+ # allows attribute set e.g. "connection.autocommit = True"
+ # to work properly
+ if hasattr(connection, 'connection'):
+ connection = connection.connection
+
+ self._set_isolation_level(connection, level)
+
+ def _set_isolation_level(self, connection, level):
+ if level not in self._isolation_lookup:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+ cursor = connection.cursor()
+ cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
+ cursor.execute("COMMIT")
+ cursor.close()
+
+ def get_isolation_level(self, connection):
+ cursor = connection.cursor()
+ cursor.execute('SELECT @@tx_isolation')
+ val = cursor.fetchone()[0]
+ cursor.close()
+ if util.py3k and isinstance(val, bytes):
+ val = val.decode()
+ return val.upper().replace("-", " ")
+
+ def do_commit(self, dbapi_connection):
+ """Execute a COMMIT."""
+
+ # COMMIT/ROLLBACK were introduced in 3.23.15.
+ # Yes, we have at least one user who has to talk to these old
+ # versions!
+ #
+ # Ignore commit/rollback if support isn't present, otherwise even
+ # basic operations via autocommit fail.
+ try:
+ dbapi_connection.commit()
+ except Exception:
+ if self.server_version_info < (3, 23, 15):
+ args = sys.exc_info()[1].args
+ if args and args[0] == 1064:
+ return
+ raise
+
+ def do_rollback(self, dbapi_connection):
+ """Execute a ROLLBACK."""
+
+ try:
+ dbapi_connection.rollback()
+ except Exception:
+ if self.server_version_info < (3, 23, 15):
+ args = sys.exc_info()[1].args
+ if args and args[0] == 1064:
+ return
+ raise
+
+ def do_begin_twophase(self, connection, xid):
+ connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
+
+ def do_prepare_twophase(self, connection, xid):
+ connection.execute(sql.text("XA END :xid"), xid=xid)
+ connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
+
+ def do_rollback_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ if not is_prepared:
+ connection.execute(sql.text("XA END :xid"), xid=xid)
+ connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
+
+ def do_commit_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ if not is_prepared:
+ self.do_prepare_twophase(connection, xid)
+ connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
+
+ def do_recover_twophase(self, connection):
+ resultset = connection.execute("XA RECOVER")
+ return [row['data'][0:row['gtrid_length']] for row in resultset]
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, (self.dbapi.OperationalError,
+ self.dbapi.ProgrammingError)):
+ return self._extract_error_code(e) in \
+ (2006, 2013, 2014, 2045, 2055)
+ elif isinstance(e, self.dbapi.InterfaceError):
+ # if underlying connection is closed,
+ # this is the error you get
+ return "(0, '')" in str(e)
+ else:
+ return False
+
+ def _compat_fetchall(self, rp, charset=None):
+ """Proxy result rows to smooth over MySQL-Python driver
+ inconsistencies."""
+
+ return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
+
+ def _compat_fetchone(self, rp, charset=None):
+ """Proxy a result row to smooth over MySQL-Python driver
+ inconsistencies."""
+
+ return _DecodingRowProxy(rp.fetchone(), charset)
+
+ def _compat_first(self, rp, charset=None):
+ """Proxy a result row to smooth over MySQL-Python driver
+ inconsistencies."""
+
+ return _DecodingRowProxy(rp.first(), charset)
+
+ def _extract_error_code(self, exception):
+ raise NotImplementedError()
+
+ def _get_default_schema_name(self, connection):
+ return connection.execute('SELECT DATABASE()').scalar()
+
+ def has_table(self, connection, table_name, schema=None):
+ # SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
+ # on macosx (and maybe win?) with multibyte table names.
+ #
+ # TODO: if this is not a problem on win, make the strategy swappable
+ # based on platform. DESCRIBE is slower.
+
+ # [ticket:726]
+ # full_name = self.identifier_preparer.format_table(table,
+ # use_schema=True)
+
+ full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
+ schema, table_name))
+
+ st = "DESCRIBE %s" % full_name
+ rs = None
+ try:
+ try:
+ rs = connection.execution_options(
+ skip_user_error_events=True).execute(st)
+ have = rs.fetchone() is not None
+ rs.close()
+ return have
+ except exc.DBAPIError as e:
+ if self._extract_error_code(e.orig) == 1146:
+ return False
+ raise
+ finally:
+ if rs:
+ rs.close()
+
+ def initialize(self, connection):
+ self._connection_charset = self._detect_charset(connection)
+ self._detect_ansiquotes(connection)
+ if self._server_ansiquotes:
+ # if ansiquotes == True, build a new IdentifierPreparer
+ # with the new setting
+ self.identifier_preparer = self.preparer(
+ self, server_ansiquotes=self._server_ansiquotes)
+
+ default.DefaultDialect.initialize(self, connection)
+
+ @property
+ def _is_mariadb(self):
+ return 'MariaDB' in self.server_version_info
+
+ @property
+ def _supports_cast(self):
+ return self.server_version_info is None or \
+ self.server_version_info >= (4, 0, 2)
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+ rp = connection.execute("SHOW schemas")
+ return [r[0] for r in rp]
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ """Return a Unicode SHOW TABLES from a given schema."""
+ if schema is not None:
+ current_schema = schema
+ else:
+ current_schema = self.default_schema_name
+
+ charset = self._connection_charset
+ if self.server_version_info < (5, 0, 2):
+ rp = connection.execute(
+ "SHOW TABLES FROM %s" %
+ self.identifier_preparer.quote_identifier(current_schema))
+ return [row[0] for
+ row in self._compat_fetchall(rp, charset=charset)]
+ else:
+ rp = connection.execute(
+ "SHOW FULL TABLES FROM %s" %
+ self.identifier_preparer.quote_identifier(current_schema))
+
+ return [row[0]
+ for row in self._compat_fetchall(rp, charset=charset)
+ if row[1] == 'BASE TABLE']
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ if self.server_version_info < (5, 0, 2):
+ raise NotImplementedError
+ if schema is None:
+ schema = self.default_schema_name
+ if self.server_version_info < (5, 0, 2):
+ return self.get_table_names(connection, schema)
+ charset = self._connection_charset
+ rp = connection.execute(
+ "SHOW FULL TABLES FROM %s" %
+ self.identifier_preparer.quote_identifier(schema))
+ return [row[0]
+ for row in self._compat_fetchall(rp, charset=charset)
+ if row[1] in ('VIEW', 'SYSTEM VIEW')]
+
+ @reflection.cache
+ def get_table_options(self, connection, table_name, schema=None, **kw):
+
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+ return parsed_state.table_options
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+ return parsed_state.columns
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+ for key in parsed_state.keys:
+ if key['type'] == 'PRIMARY':
+ # There can be only one.
+ cols = [s[0] for s in key['columns']]
+ return {'constrained_columns': cols, 'name': None}
+ return {'constrained_columns': [], 'name': None}
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+ default_schema = None
+
+ fkeys = []
+
+ for spec in parsed_state.constraints:
+ # only FOREIGN KEYs
+ ref_name = spec['table'][-1]
+ ref_schema = len(spec['table']) > 1 and \
+ spec['table'][-2] or schema
+
+ if not ref_schema:
+ if default_schema is None:
+ default_schema = \
+ connection.dialect.default_schema_name
+ if schema == default_schema:
+ ref_schema = schema
+
+ loc_names = spec['local']
+ ref_names = spec['foreign']
+
+ con_kw = {}
+ for opt in ('onupdate', 'ondelete'):
+ if spec.get(opt, False):
+ con_kw[opt] = spec[opt]
+
+ fkey_d = {
+ 'name': spec['name'],
+ 'constrained_columns': loc_names,
+ 'referred_schema': ref_schema,
+ 'referred_table': ref_name,
+ 'referred_columns': ref_names,
+ 'options': con_kw
+ }
+ fkeys.append(fkey_d)
+ return fkeys
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+
+ indexes = []
+ for spec in parsed_state.keys:
+ unique = False
+ flavor = spec['type']
+ if flavor == 'PRIMARY':
+ continue
+ if flavor == 'UNIQUE':
+ unique = True
+ elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
+ pass
+ else:
+ self.logger.info(
+ "Converting unknown KEY type %s to a plain KEY", flavor)
+ pass
+ index_d = {}
+ index_d['name'] = spec['name']
+ index_d['column_names'] = [s[0] for s in spec['columns']]
+ index_d['unique'] = unique
+ if flavor:
+ index_d['type'] = flavor
+ indexes.append(index_d)
+ return indexes
+
+ @reflection.cache
+ def get_unique_constraints(self, connection, table_name,
+ schema=None, **kw):
+ parsed_state = self._parsed_state_or_create(
+ connection, table_name, schema, **kw)
+
+ return [
+ {
+ 'name': key['name'],
+ 'column_names': [col[0] for col in key['columns']],
+ 'duplicates_index': key['name'],
+ }
+ for key in parsed_state.keys
+ if key['type'] == 'UNIQUE'
+ ]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+
+ charset = self._connection_charset
+ full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
+ schema, view_name))
+ sql = self._show_create_table(connection, None, charset,
+ full_name=full_name)
+ return sql
+
+ def _parsed_state_or_create(self, connection, table_name,
+ schema=None, **kw):
+ return self._setup_parser(
+ connection,
+ table_name,
+ schema,
+ info_cache=kw.get('info_cache', None)
+ )
+
+ @util.memoized_property
+ def _tabledef_parser(self):
+ """return the MySQLTableDefinitionParser, generate if needed.
+
+ The deferred creation ensures that the dialect has
+ retrieved server version information first.
+
+ """
+ if (self.server_version_info < (4, 1) and self._server_ansiquotes):
+ # ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
+ preparer = self.preparer(self, server_ansiquotes=False)
+ else:
+ preparer = self.identifier_preparer
+ return _reflection.MySQLTableDefinitionParser(self, preparer)
+
+ @reflection.cache
+ def _setup_parser(self, connection, table_name, schema=None, **kw):
+ charset = self._connection_charset
+ parser = self._tabledef_parser
+ full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
+ schema, table_name))
+ sql = self._show_create_table(connection, None, charset,
+ full_name=full_name)
+ if re.match(r'^CREATE (?:ALGORITHM)?.* VIEW', sql):
+ # Adapt views to something table-like.
+ columns = self._describe_table(connection, None, charset,
+ full_name=full_name)
+ sql = parser._describe_to_create(table_name, columns)
+ return parser.parse(sql, charset)
+
+ def _detect_charset(self, connection):
+ raise NotImplementedError()
+
+ def _detect_casing(self, connection):
+ """Sniff out identifier case sensitivity.
+
+ Cached per-connection. This value can not change without a server
+ restart.
+
+ """
+ # http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
+
+ charset = self._connection_charset
+ row = self._compat_first(connection.execute(
+ "SHOW VARIABLES LIKE 'lower_case_table_names'"),
+ charset=charset)
+ if not row:
+ cs = 0
+ else:
+ # 4.0.15 returns OFF or ON according to [ticket:489]
+ # 3.23 doesn't, 4.0.27 doesn't..
+ if row[1] == 'OFF':
+ cs = 0
+ elif row[1] == 'ON':
+ cs = 1
+ else:
+ cs = int(row[1])
+ return cs
+
+ def _detect_collations(self, connection):
+ """Pull the active COLLATIONS list from the server.
+
+ Cached per-connection.
+ """
+
+ collations = {}
+ if self.server_version_info < (4, 1, 0):
+ pass
+ else:
+ charset = self._connection_charset
+ rs = connection.execute('SHOW COLLATION')
+ for row in self._compat_fetchall(rs, charset):
+ collations[row[0]] = row[1]
+ return collations
+
+ def _detect_ansiquotes(self, connection):
+ """Detect and adjust for the ANSI_QUOTES sql mode."""
+
+ row = self._compat_first(
+ connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
+ charset=self._connection_charset)
+
+ if not row:
+ mode = ''
+ else:
+ mode = row[1] or ''
+ # 4.0
+ if mode.isdigit():
+ mode_no = int(mode)
+ mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
+
+ self._server_ansiquotes = 'ANSI_QUOTES' in mode
+
+ # as of MySQL 5.0.1
+ self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
+
+ def _show_create_table(self, connection, table, charset=None,
+ full_name=None):
+ """Run SHOW CREATE TABLE for a ``Table``."""
+
+ if full_name is None:
+ full_name = self.identifier_preparer.format_table(table)
+ st = "SHOW CREATE TABLE %s" % full_name
+
+ rp = None
+ try:
+ rp = connection.execution_options(
+ skip_user_error_events=True).execute(st)
+ except exc.DBAPIError as e:
+ if self._extract_error_code(e.orig) == 1146:
+ raise exc.NoSuchTableError(full_name)
+ else:
+ raise
+ row = self._compat_first(rp, charset=charset)
+ if not row:
+ raise exc.NoSuchTableError(full_name)
+ return row[1].strip()
+
+ return sql
+
+ def _describe_table(self, connection, table, charset=None,
+ full_name=None):
+ """Run DESCRIBE for a ``Table`` and return processed rows."""
+
+ if full_name is None:
+ full_name = self.identifier_preparer.format_table(table)
+ st = "DESCRIBE %s" % full_name
+
+ rp, rows = None, None
+ try:
+ try:
+ rp = connection.execution_options(
+ skip_user_error_events=True).execute(st)
+ except exc.DBAPIError as e:
+ if self._extract_error_code(e.orig) == 1146:
+ raise exc.NoSuchTableError(full_name)
+ else:
+ raise
+ rows = self._compat_fetchall(rp, charset=charset)
+ finally:
+ if rp:
+ rp.close()
+ return rows
+
+
+
+class _DecodingRowProxy(object):
+ """Return unicode-decoded values based on type inspection.
+
+ Smooth over data type issues (esp. with alpha driver versions) and
+ normalize strings as Unicode regardless of user-configured driver
+ encoding settings.
+
+ """
+
+ # Some MySQL-python versions can return some columns as
+ # sets.Set(['value']) (seriously) but thankfully that doesn't
+ # seem to come up in DDL queries.
+
+ _encoding_compat = {
+ 'koi8r': 'koi8_r',
+ 'koi8u': 'koi8_u',
+ 'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian
+ 'utf8mb4': 'utf8', # real utf8
+ 'eucjpms': 'ujis',
+ }
+
+ def __init__(self, rowproxy, charset):
+ self.rowproxy = rowproxy
+ self.charset = self._encoding_compat.get(charset, charset)
+
+ def __getitem__(self, index):
+ item = self.rowproxy[index]
+ if isinstance(item, _array):
+ item = item.tostring()
+
+ if self.charset and isinstance(item, util.binary_type):
+ return item.decode(self.charset)
+ else:
+ return item
+
+ def __getattr__(self, attr):
+ item = getattr(self.rowproxy, attr)
+ if isinstance(item, _array):
+ item = item.tostring()
+ if self.charset and isinstance(item, util.binary_type):
+ return item.decode(self.charset)
+ else:
+ return item
+
diff --git a/app/lib/sqlalchemy/dialects/mysql/cymysql.py b/app/lib/sqlalchemy/dialects/mysql/cymysql.py
new file mode 100644
index 0000000..a5ddb1a
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/cymysql.py
@@ -0,0 +1,87 @@
+# mysql/cymysql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: mysql+cymysql
+ :name: CyMySQL
+ :dbapi: cymysql
+ :connectstring: mysql+cymysql://:@/\
+[?]
+ :url: https://github.com/nakagami/CyMySQL
+
+"""
+import re
+
+from .mysqldb import MySQLDialect_mysqldb
+from .base import (BIT, MySQLDialect)
+from ... import util
+
+
+class _cymysqlBIT(BIT):
+ def result_processor(self, dialect, coltype):
+ """Convert a MySQL's 64 bit, variable length binary string to a long.
+ """
+
+ def process(value):
+ if value is not None:
+ v = 0
+ for i in util.iterbytes(value):
+ v = v << 8 | i
+ return v
+ return value
+ return process
+
+
+class MySQLDialect_cymysql(MySQLDialect_mysqldb):
+ driver = 'cymysql'
+
+ description_encoding = None
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = False
+ supports_unicode_statements = True
+
+ colspecs = util.update_copy(
+ MySQLDialect.colspecs,
+ {
+ BIT: _cymysqlBIT,
+ }
+ )
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('cymysql')
+
+ def _get_server_version_info(self, connection):
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(dbapi_con.server_version):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+ def _detect_charset(self, connection):
+ return connection.connection.charset
+
+ def _extract_error_code(self, exception):
+ return exception.errno
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, self.dbapi.OperationalError):
+ return self._extract_error_code(e) in \
+ (2006, 2013, 2014, 2045, 2055)
+ elif isinstance(e, self.dbapi.InterfaceError):
+ # if underlying connection is closed,
+ # this is the error you get
+ return True
+ else:
+ return False
+
+dialect = MySQLDialect_cymysql
diff --git a/app/lib/sqlalchemy/dialects/mysql/enumerated.py b/app/lib/sqlalchemy/dialects/mysql/enumerated.py
new file mode 100644
index 0000000..495bee5
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/enumerated.py
@@ -0,0 +1,311 @@
+# mysql/enumerated.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+
+from .types import _StringType
+from ... import exc, sql, util
+from ... import types as sqltypes
+
+
+class _EnumeratedValues(_StringType):
+ def _init_values(self, values, kw):
+ self.quoting = kw.pop('quoting', 'auto')
+
+ if self.quoting == 'auto' and len(values):
+ # What quoting character are we using?
+ q = None
+ for e in values:
+ if len(e) == 0:
+ self.quoting = 'unquoted'
+ break
+ elif q is None:
+ q = e[0]
+
+ if len(e) == 1 or e[0] != q or e[-1] != q:
+ self.quoting = 'unquoted'
+ break
+ else:
+ self.quoting = 'quoted'
+
+ if self.quoting == 'quoted':
+ util.warn_deprecated(
+ 'Manually quoting %s value literals is deprecated. Supply '
+ 'unquoted values and use the quoting= option in cases of '
+ 'ambiguity.' % self.__class__.__name__)
+
+ values = self._strip_values(values)
+
+ self._enumerated_values = values
+ length = max([len(v) for v in values] + [0])
+ return values, length
+
+ @classmethod
+ def _strip_values(cls, values):
+ strip_values = []
+ for a in values:
+ if a[0:1] == '"' or a[0:1] == "'":
+ # strip enclosing quotes and unquote interior
+ a = a[1:-1].replace(a[0] * 2, a[0])
+ strip_values.append(a)
+ return strip_values
+
+
+class ENUM(sqltypes.Enum, _EnumeratedValues):
+ """MySQL ENUM type."""
+
+ __visit_name__ = 'ENUM'
+
+ def __init__(self, *enums, **kw):
+ """Construct an ENUM.
+
+ E.g.::
+
+ Column('myenum', ENUM("foo", "bar", "baz"))
+
+ :param enums: The range of valid values for this ENUM. Values will be
+ quoted when generating the schema according to the quoting flag (see
+ below). This object may also be a PEP-435-compliant enumerated
+ type.
+
+ .. versionadded: 1.1 added support for PEP-435-compliant enumerated
+ types.
+
+ :param strict: This flag has no effect.
+
+ .. versionchanged:: The MySQL ENUM type as well as the base Enum
+ type now validates all Python data values.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ :param quoting: Defaults to 'auto': automatically determine enum value
+ quoting. If all enum values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ """
+
+ kw.pop('strict', None)
+ validate_strings = kw.pop("validate_strings", False)
+ sqltypes.Enum.__init__(
+ self, validate_strings=validate_strings, *enums)
+ kw.pop('metadata', None)
+ kw.pop('schema', None)
+ kw.pop('name', None)
+ kw.pop('quote', None)
+ kw.pop('native_enum', None)
+ kw.pop('inherit_schema', None)
+ kw.pop('_create_events', None)
+ _StringType.__init__(self, length=self.length, **kw)
+
+ def _setup_for_values(self, values, objects, kw):
+ values, length = self._init_values(values, kw)
+ return sqltypes.Enum._setup_for_values(self, values, objects, kw)
+
+ def _object_value_for_elem(self, elem):
+ # mysql sends back a blank string for any value that
+ # was persisted that was not in the enums; that is, it does no
+ # validation on the incoming data, it "truncates" it to be
+ # the blank string. Return it straight.
+ if elem == "":
+ return elem
+ else:
+ return super(ENUM, self)._object_value_for_elem(elem)
+
+ def __repr__(self):
+ return util.generic_repr(
+ self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
+
+ def adapt(self, cls, **kw):
+ return sqltypes.Enum.adapt(self, cls, **kw)
+
+
+class SET(_EnumeratedValues):
+ """MySQL SET type."""
+
+ __visit_name__ = 'SET'
+
+ def __init__(self, *values, **kw):
+ """Construct a SET.
+
+ E.g.::
+
+ Column('myset', SET("foo", "bar", "baz"))
+
+
+ The list of potential values is required in the case that this
+ set will be used to generate DDL for a table, or if the
+ :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
+
+ :param values: The range of valid values for this SET.
+
+ :param convert_unicode: Same flag as that of
+ :paramref:`.String.convert_unicode`.
+
+ :param collation: same as that of :paramref:`.String.collation`
+
+ :param charset: same as that of :paramref:`.VARCHAR.charset`.
+
+ :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
+
+ :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
+
+ :param binary: same as that of :paramref:`.VARCHAR.binary`.
+
+ :param quoting: Defaults to 'auto': automatically determine set value
+ quoting. If all values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ .. versionadded:: 0.9.0
+
+ :param retrieve_as_bitwise: if True, the data for the set type will be
+ persisted and selected using an integer value, where a set is coerced
+ into a bitwise mask for persistence. MySQL allows this mode which
+ has the advantage of being able to store values unambiguously,
+ such as the blank string ``''``. The datatype will appear
+ as the expression ``col + 0`` in a SELECT statement, so that the
+ value is coerced into an integer value in result sets.
+ This flag is required if one wishes
+ to persist a set that can store the blank string ``''`` as a value.
+
+ .. warning::
+
+ When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
+ essential that the list of set values is expressed in the
+ **exact same order** as exists on the MySQL database.
+
+ .. versionadded:: 1.0.0
+
+
+ """
+ self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
+ values, length = self._init_values(values, kw)
+ self.values = tuple(values)
+ if not self.retrieve_as_bitwise and '' in values:
+ raise exc.ArgumentError(
+ "Can't use the blank value '' in a SET without "
+ "setting retrieve_as_bitwise=True")
+ if self.retrieve_as_bitwise:
+ self._bitmap = dict(
+ (value, 2 ** idx)
+ for idx, value in enumerate(self.values)
+ )
+ self._bitmap.update(
+ (2 ** idx, value)
+ for idx, value in enumerate(self.values)
+ )
+ kw.setdefault('length', length)
+ super(SET, self).__init__(**kw)
+
+ def column_expression(self, colexpr):
+ if self.retrieve_as_bitwise:
+ return sql.type_coerce(
+ sql.type_coerce(colexpr, sqltypes.Integer) + 0,
+ self
+ )
+ else:
+ return colexpr
+
+ def result_processor(self, dialect, coltype):
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is not None:
+ value = int(value)
+
+ return set(
+ util.map_bits(self._bitmap.__getitem__, value)
+ )
+ else:
+ return None
+ else:
+ super_convert = super(SET, self).result_processor(dialect, coltype)
+
+ def process(value):
+ if isinstance(value, util.string_types):
+ # MySQLdb returns a string, let's parse
+ if super_convert:
+ value = super_convert(value)
+ return set(re.findall(r'[^,]+', value))
+ else:
+ # mysql-connector-python does a naive
+ # split(",") which throws in an empty string
+ if value is not None:
+ value.discard('')
+ return value
+ return process
+
+ def bind_processor(self, dialect):
+ super_convert = super(SET, self).bind_processor(dialect)
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, util.int_types + util.string_types):
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ else:
+ int_value = 0
+ for v in value:
+ int_value |= self._bitmap[v]
+ return int_value
+ else:
+
+ def process(value):
+ # accept strings and int (actually bitflag) values directly
+ if value is not None and not isinstance(
+ value, util.int_types + util.string_types):
+ value = ",".join(value)
+
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ return process
+
+ def adapt(self, impltype, **kw):
+ kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
+ return util.constructor_copy(
+ self, impltype,
+ *self.values,
+ **kw
+ )
diff --git a/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py
new file mode 100644
index 0000000..1c64823
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/gaerdbms.py
@@ -0,0 +1,102 @@
+# mysql/gaerdbms.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+.. dialect:: mysql+gaerdbms
+ :name: Google Cloud SQL
+ :dbapi: rdbms
+ :connectstring: mysql+gaerdbms:///?instance=
+ :url: https://developers.google.com/appengine/docs/python/cloud-sql/\
+developers-guide
+
+ This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
+ minimal changes.
+
+ .. versionadded:: 0.7.8
+
+ .. deprecated:: 1.0 This dialect is **no longer necessary** for
+ Google Cloud SQL; the MySQLdb dialect can be used directly.
+ Cloud SQL now recommends creating connections via the
+ mysql dialect using the URL format
+
+ ``mysql+mysqldb://root@/?unix_socket=/cloudsql/:``
+
+
+Pooling
+-------
+
+Google App Engine connections appear to be randomly recycled,
+so the dialect does not pool connections. The :class:`.NullPool`
+implementation is installed within the :class:`.Engine` by
+default.
+
+"""
+
+import os
+
+from .mysqldb import MySQLDialect_mysqldb
+from ...pool import NullPool
+import re
+from sqlalchemy.util import warn_deprecated
+
+
+def _is_dev_environment():
+ return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
+
+
+class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
+
+ @classmethod
+ def dbapi(cls):
+
+ warn_deprecated(
+ "Google Cloud SQL now recommends creating connections via the "
+ "MySQLdb dialect directly, using the URL format "
+ "mysql+mysqldb://root@/?unix_socket=/cloudsql/"
+ ":"
+ )
+
+ # from django:
+ # http://code.google.com/p/googleappengine/source/
+ # browse/trunk/python/google/storage/speckle/
+ # python/django/backend/base.py#118
+ # see also [ticket:2649]
+ # see also http://stackoverflow.com/q/14224679/34549
+ from google.appengine.api import apiproxy_stub_map
+
+ if _is_dev_environment():
+ from google.appengine.api import rdbms_mysqldb
+ return rdbms_mysqldb
+ elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
+ from google.storage.speckle.python.api import rdbms_apiproxy
+ return rdbms_apiproxy
+ else:
+ from google.storage.speckle.python.api import rdbms_googleapi
+ return rdbms_googleapi
+
+ @classmethod
+ def get_pool_class(cls, url):
+ # Cloud SQL connections die at any moment
+ return NullPool
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args()
+ if not _is_dev_environment():
+ # 'dsn' and 'instance' are because we are skipping
+ # the traditional google.api.rdbms wrapper
+ opts['dsn'] = ''
+ opts['instance'] = url.query['instance']
+ return [], opts
+
+ def _extract_error_code(self, exception):
+ match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
+ # The rdbms api will wrap then re-raise some types of errors
+ # making this regex return no matches.
+ code = match.group(1) or match.group(2) if match else None
+ if code:
+ return int(code)
+
+dialect = MySQLDialect_gaerdbms
diff --git a/app/lib/sqlalchemy/dialects/mysql/json.py b/app/lib/sqlalchemy/dialects/mysql/json.py
new file mode 100644
index 0000000..d7b8666
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/json.py
@@ -0,0 +1,79 @@
+# mysql/json.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from __future__ import absolute_import
+
+import json
+
+from ...sql import elements
+from ... import types as sqltypes
+from ... import util
+
+
+class JSON(sqltypes.JSON):
+ """MySQL JSON type.
+
+ MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
+ support JSON at the time of this writing.
+
+ The :class:`.mysql.JSON` type supports persistence of JSON values
+ as well as the core index operations provided by :class:`.types.JSON`
+ datatype, by adapting the operations to render the ``JSON_EXTRACT``
+ function at the database level.
+
+ .. versionadded:: 1.1
+
+ """
+
+ pass
+
+
+class _FormatTypeMixin(object):
+ def _format_value(self, value):
+ raise NotImplementedError()
+
+ def bind_processor(self, dialect):
+ super_proc = self.string_bind_processor(dialect)
+
+ def process(value):
+ value = self._format_value(value)
+ if super_proc:
+ value = super_proc(value)
+ return value
+
+ return process
+
+ def literal_processor(self, dialect):
+ super_proc = self.string_literal_processor(dialect)
+
+ def process(value):
+ value = self._format_value(value)
+ if super_proc:
+ value = super_proc(value)
+ return value
+
+ return process
+
+
+class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
+
+ def _format_value(self, value):
+ if isinstance(value, int):
+ value = "$[%s]" % value
+ else:
+ value = '$."%s"' % value
+ return value
+
+
+class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
+ def _format_value(self, value):
+ return "$%s" % (
+ "".join([
+ "[%s]" % elem if isinstance(elem, int)
+ else '."%s"' % elem for elem in value
+ ])
+ )
diff --git a/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
new file mode 100644
index 0000000..ac77ebc
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
@@ -0,0 +1,203 @@
+# mysql/mysqlconnector.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: mysql+mysqlconnector
+ :name: MySQL Connector/Python
+ :dbapi: myconnpy
+ :connectstring: mysql+mysqlconnector://:@\
+[:]/
+ :url: http://dev.mysql.com/downloads/connector/python/
+
+
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
+
+"""
+
+from .base import (MySQLDialect, MySQLExecutionContext,
+ MySQLCompiler, MySQLIdentifierPreparer,
+ BIT)
+
+from ... import util
+import re
+
+
+class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
+
+ def get_lastrowid(self):
+ return self.cursor.lastrowid
+
+
+class MySQLCompiler_mysqlconnector(MySQLCompiler):
+ def visit_mod_binary(self, binary, operator, **kw):
+ if self.dialect._mysqlconnector_double_percents:
+ return self.process(binary.left, **kw) + " %% " + \
+ self.process(binary.right, **kw)
+ else:
+ return self.process(binary.left, **kw) + " % " + \
+ self.process(binary.right, **kw)
+
+ def post_process_text(self, text):
+ if self.dialect._mysqlconnector_double_percents:
+ return text.replace('%', '%%')
+ else:
+ return text
+
+ def escape_literal_column(self, text):
+ if self.dialect._mysqlconnector_double_percents:
+ return text.replace('%', '%%')
+ else:
+ return text
+
+
+class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer):
+
+ def _escape_identifier(self, value):
+ value = value.replace(self.escape_quote, self.escape_to_quote)
+ if self.dialect._mysqlconnector_double_percents:
+ return value.replace("%", "%%")
+ else:
+ return value
+
+
+class _myconnpyBIT(BIT):
+ def result_processor(self, dialect, coltype):
+ """MySQL-connector already converts mysql bits, so."""
+
+ return None
+
+
+class MySQLDialect_mysqlconnector(MySQLDialect):
+ driver = 'mysqlconnector'
+
+ supports_unicode_binds = True
+
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = True
+
+ supports_native_decimal = True
+
+ default_paramstyle = 'format'
+ execution_ctx_cls = MySQLExecutionContext_mysqlconnector
+ statement_compiler = MySQLCompiler_mysqlconnector
+
+ preparer = MySQLIdentifierPreparer_mysqlconnector
+
+ colspecs = util.update_copy(
+ MySQLDialect.colspecs,
+ {
+ BIT: _myconnpyBIT,
+ }
+ )
+
+ @util.memoized_property
+ def supports_unicode_statements(self):
+ return util.py3k or self._mysqlconnector_version_info > (2, 0)
+
+ @classmethod
+ def dbapi(cls):
+ from mysql import connector
+ return connector
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+
+ opts.update(url.query)
+
+ util.coerce_kw_type(opts, 'allow_local_infile', bool)
+ util.coerce_kw_type(opts, 'autocommit', bool)
+ util.coerce_kw_type(opts, 'buffered', bool)
+ util.coerce_kw_type(opts, 'compress', bool)
+ util.coerce_kw_type(opts, 'connection_timeout', int)
+ util.coerce_kw_type(opts, 'connect_timeout', int)
+ util.coerce_kw_type(opts, 'consume_results', bool)
+ util.coerce_kw_type(opts, 'force_ipv6', bool)
+ util.coerce_kw_type(opts, 'get_warnings', bool)
+ util.coerce_kw_type(opts, 'pool_reset_session', bool)
+ util.coerce_kw_type(opts, 'pool_size', int)
+ util.coerce_kw_type(opts, 'raise_on_warnings', bool)
+ util.coerce_kw_type(opts, 'raw', bool)
+ util.coerce_kw_type(opts, 'ssl_verify_cert', bool)
+ util.coerce_kw_type(opts, 'use_pure', bool)
+ util.coerce_kw_type(opts, 'use_unicode', bool)
+
+ # unfortunately, MySQL/connector python refuses to release a
+ # cursor without reading fully, so non-buffered isn't an option
+ opts.setdefault('buffered', True)
+
+ # FOUND_ROWS must be set in ClientFlag to enable
+ # supports_sane_rowcount.
+ if self.dbapi is not None:
+ try:
+ from mysql.connector.constants import ClientFlag
+ client_flags = opts.get(
+ 'client_flags', ClientFlag.get_default())
+ client_flags |= ClientFlag.FOUND_ROWS
+ opts['client_flags'] = client_flags
+ except Exception:
+ pass
+ return [[], opts]
+
+ @util.memoized_property
+ def _mysqlconnector_version_info(self):
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
+ self.dbapi.__version__)
+ if m:
+ return tuple(
+ int(x)
+ for x in m.group(1, 2, 3)
+ if x is not None)
+
+ @util.memoized_property
+ def _mysqlconnector_double_percents(self):
+ return not util.py3k and self._mysqlconnector_version_info < (2, 0)
+
+ def _get_server_version_info(self, connection):
+ dbapi_con = connection.connection
+ version = dbapi_con.get_server_version()
+ return tuple(version)
+
+ def _detect_charset(self, connection):
+ return connection.connection.charset
+
+ def _extract_error_code(self, exception):
+ return exception.errno
+
+ def is_disconnect(self, e, connection, cursor):
+ errnos = (2006, 2013, 2014, 2045, 2055, 2048)
+ exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError)
+ if isinstance(e, exceptions):
+ return e.errno in errnos or \
+ "MySQL Connection not available." in str(e)
+ else:
+ return False
+
+ def _compat_fetchall(self, rp, charset=None):
+ return rp.fetchall()
+
+ def _compat_fetchone(self, rp, charset=None):
+ return rp.fetchone()
+
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ',
+ 'AUTOCOMMIT'])
+
+ def _set_isolation_level(self, connection, level):
+ if level == 'AUTOCOMMIT':
+ connection.autocommit = True
+ else:
+ connection.autocommit = False
+ super(MySQLDialect_mysqlconnector, self)._set_isolation_level(
+ connection, level)
+
+
+dialect = MySQLDialect_mysqlconnector
diff --git a/app/lib/sqlalchemy/dialects/mysql/mysqldb.py b/app/lib/sqlalchemy/dialects/mysql/mysqldb.py
new file mode 100644
index 0000000..6af8601
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/mysqldb.py
@@ -0,0 +1,228 @@
+# mysql/mysqldb.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: mysql+mysqldb
+ :name: MySQL-Python
+ :dbapi: mysqldb
+ :connectstring: mysql+mysqldb://:@[:]/
+ :url: http://sourceforge.net/projects/mysql-python
+
+.. _mysqldb_unicode:
+
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
+
+Py3K Support
+------------
+
+Currently, MySQLdb only runs on Python 2 and development has been stopped.
+`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well
+as some bugfixes.
+
+.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
+
+Using MySQLdb with Google Cloud SQL
+-----------------------------------
+
+Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
+using a URL like the following::
+
+ mysql+mysqldb://root@/?unix_socket=/cloudsql/:
+
+Server Side Cursors
+-------------------
+
+The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
+
+"""
+
+from .base import (MySQLDialect, MySQLExecutionContext,
+ MySQLCompiler, MySQLIdentifierPreparer)
+from .base import TEXT
+from ... import sql
+from ... import util
+import re
+
+
+class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
+
+ @property
+ def rowcount(self):
+ if hasattr(self, '_rowcount'):
+ return self._rowcount
+ else:
+ return self.cursor.rowcount
+
+
+class MySQLCompiler_mysqldb(MySQLCompiler):
+ def visit_mod_binary(self, binary, operator, **kw):
+ return self.process(binary.left, **kw) + " %% " + \
+ self.process(binary.right, **kw)
+
+ def post_process_text(self, text):
+ return text.replace('%', '%%')
+
+
+class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
+
+ def _escape_identifier(self, value):
+ value = value.replace(self.escape_quote, self.escape_to_quote)
+ return value.replace("%", "%%")
+
+
+class MySQLDialect_mysqldb(MySQLDialect):
+ driver = 'mysqldb'
+ supports_unicode_statements = True
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = True
+
+ supports_native_decimal = True
+
+ default_paramstyle = 'format'
+ execution_ctx_cls = MySQLExecutionContext_mysqldb
+ statement_compiler = MySQLCompiler_mysqldb
+ preparer = MySQLIdentifierPreparer_mysqldb
+
+ def __init__(self, server_side_cursors=False, **kwargs):
+ super(MySQLDialect_mysqldb, self).__init__(**kwargs)
+ self.server_side_cursors = server_side_cursors
+
+ @util.langhelpers.memoized_property
+ def supports_server_side_cursors(self):
+ try:
+ cursors = __import__('MySQLdb.cursors').cursors
+ self._sscursor = cursors.SSCursor
+ return True
+ except (ImportError, AttributeError):
+ return False
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('MySQLdb')
+
+ def do_executemany(self, cursor, statement, parameters, context=None):
+ rowcount = cursor.executemany(statement, parameters)
+ if context is not None:
+ context._rowcount = rowcount
+
+ def _check_unicode_returns(self, connection):
+ # work around issue fixed in
+ # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
+ # specific issue w/ the utf8_bin collation and unicode returns
+
+ has_utf8_bin = self.server_version_info > (5, ) and \
+ connection.scalar(
+ "show collation where %s = 'utf8' and %s = 'utf8_bin'"
+ % (
+ self.identifier_preparer.quote("Charset"),
+ self.identifier_preparer.quote("Collation")
+ ))
+ if has_utf8_bin:
+ additional_tests = [
+ sql.collate(sql.cast(
+ sql.literal_column(
+ "'test collated returns'"),
+ TEXT(charset='utf8')), "utf8_bin")
+ ]
+ else:
+ additional_tests = []
+ return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
+ connection, additional_tests)
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(database='db', username='user',
+ password='passwd')
+ opts.update(url.query)
+
+ util.coerce_kw_type(opts, 'compress', bool)
+ util.coerce_kw_type(opts, 'connect_timeout', int)
+ util.coerce_kw_type(opts, 'read_timeout', int)
+ util.coerce_kw_type(opts, 'client_flag', int)
+ util.coerce_kw_type(opts, 'local_infile', int)
+ # Note: using either of the below will cause all strings to be
+ # returned as Unicode, both in raw SQL operations and with column
+ # types like String and MSString.
+ util.coerce_kw_type(opts, 'use_unicode', bool)
+ util.coerce_kw_type(opts, 'charset', str)
+
+ # Rich values 'cursorclass' and 'conv' are not supported via
+ # query string.
+
+ ssl = {}
+ keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']
+ for key in keys:
+ if key in opts:
+ ssl[key[4:]] = opts[key]
+ util.coerce_kw_type(ssl, key[4:], str)
+ del opts[key]
+ if ssl:
+ opts['ssl'] = ssl
+
+ # FOUND_ROWS must be set in CLIENT_FLAGS to enable
+ # supports_sane_rowcount.
+ client_flag = opts.get('client_flag', 0)
+ if self.dbapi is not None:
+ try:
+ CLIENT_FLAGS = __import__(
+ self.dbapi.__name__ + '.constants.CLIENT'
+ ).constants.CLIENT
+ client_flag |= CLIENT_FLAGS.FOUND_ROWS
+ except (AttributeError, ImportError):
+ self.supports_sane_rowcount = False
+ opts['client_flag'] = client_flag
+ return [[], opts]
+
+ def _get_server_version_info(self, connection):
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(dbapi_con.get_server_info()):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+ def _extract_error_code(self, exception):
+ return exception.args[0]
+
+ def _detect_charset(self, connection):
+ """Sniff out the character set in use for connection results."""
+
+ try:
+ # note: the SQL here would be
+ # "SHOW VARIABLES LIKE 'character_set%%'"
+ cset_name = connection.connection.character_set_name
+ except AttributeError:
+ util.warn(
+ "No 'character_set_name' can be detected with "
+ "this MySQL-Python version; "
+ "please upgrade to a recent version of MySQL-Python. "
+ "Assuming latin1.")
+ return 'latin1'
+ else:
+ return cset_name()
+
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ',
+ 'AUTOCOMMIT'])
+
+ def _set_isolation_level(self, connection, level):
+ if level == 'AUTOCOMMIT':
+ connection.autocommit(True)
+ else:
+ connection.autocommit(False)
+ super(MySQLDialect_mysqldb, self)._set_isolation_level(connection,
+ level)
+
+
+dialect = MySQLDialect_mysqldb
diff --git a/app/lib/sqlalchemy/dialects/mysql/oursql.py b/app/lib/sqlalchemy/dialects/mysql/oursql.py
new file mode 100644
index 0000000..f7f90e9
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/oursql.py
@@ -0,0 +1,254 @@
+# mysql/oursql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: mysql+oursql
+ :name: OurSQL
+ :dbapi: oursql
+ :connectstring: mysql+oursql://:@[:]/
+ :url: http://packages.python.org/oursql/
+
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
+
+
+"""
+
+import re
+
+from .base import (BIT, MySQLDialect, MySQLExecutionContext)
+from ... import types as sqltypes, util
+
+
+class _oursqlBIT(BIT):
+ def result_processor(self, dialect, coltype):
+ """oursql already converts mysql bits, so."""
+
+ return None
+
+
+class MySQLExecutionContext_oursql(MySQLExecutionContext):
+
+ @property
+ def plain_query(self):
+ return self.execution_options.get('_oursql_plain_query', False)
+
+
+class MySQLDialect_oursql(MySQLDialect):
+ driver = 'oursql'
+
+ if util.py2k:
+ supports_unicode_binds = True
+ supports_unicode_statements = True
+
+ supports_native_decimal = True
+
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = True
+ execution_ctx_cls = MySQLExecutionContext_oursql
+
+ colspecs = util.update_copy(
+ MySQLDialect.colspecs,
+ {
+ sqltypes.Time: sqltypes.Time,
+ BIT: _oursqlBIT,
+ }
+ )
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('oursql')
+
+ def do_execute(self, cursor, statement, parameters, context=None):
+ """Provide an implementation of
+ *cursor.execute(statement, parameters)*."""
+
+ if context and context.plain_query:
+ cursor.execute(statement, plain_query=True)
+ else:
+ cursor.execute(statement, parameters)
+
+ def do_begin(self, connection):
+ connection.cursor().execute('BEGIN', plain_query=True)
+
+ def _xa_query(self, connection, query, xid):
+ if util.py2k:
+ arg = connection.connection._escape_string(xid)
+ else:
+ charset = self._connection_charset
+ arg = connection.connection._escape_string(
+ xid.encode(charset)).decode(charset)
+ arg = "'%s'" % arg
+ connection.execution_options(
+ _oursql_plain_query=True).execute(query % arg)
+
+ # Because mysql is bad, these methods have to be
+ # reimplemented to use _PlainQuery. Basically, some queries
+ # refuse to return any data if they're run through
+ # the parameterized query API, or refuse to be parameterized
+ # in the first place.
+ def do_begin_twophase(self, connection, xid):
+ self._xa_query(connection, 'XA BEGIN %s', xid)
+
+ def do_prepare_twophase(self, connection, xid):
+ self._xa_query(connection, 'XA END %s', xid)
+ self._xa_query(connection, 'XA PREPARE %s', xid)
+
+ def do_rollback_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ if not is_prepared:
+ self._xa_query(connection, 'XA END %s', xid)
+ self._xa_query(connection, 'XA ROLLBACK %s', xid)
+
+ def do_commit_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ if not is_prepared:
+ self.do_prepare_twophase(connection, xid)
+ self._xa_query(connection, 'XA COMMIT %s', xid)
+
+ # Q: why didn't we need all these "plain_query" overrides earlier ?
+ # am i on a newer/older version of OurSQL ?
+ def has_table(self, connection, table_name, schema=None):
+ return MySQLDialect.has_table(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ table_name,
+ schema
+ )
+
+ def get_table_options(self, connection, table_name, schema=None, **kw):
+ return MySQLDialect.get_table_options(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ table_name,
+ schema=schema,
+ **kw
+ )
+
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ return MySQLDialect.get_columns(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ table_name,
+ schema=schema,
+ **kw
+ )
+
+ def get_view_names(self, connection, schema=None, **kw):
+ return MySQLDialect.get_view_names(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ schema=schema,
+ **kw
+ )
+
+ def get_table_names(self, connection, schema=None, **kw):
+ return MySQLDialect.get_table_names(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ schema
+ )
+
+ def get_schema_names(self, connection, **kw):
+ return MySQLDialect.get_schema_names(
+ self,
+ connection.connect().execution_options(_oursql_plain_query=True),
+ **kw
+ )
+
+ def initialize(self, connection):
+ return MySQLDialect.initialize(
+ self,
+ connection.execution_options(_oursql_plain_query=True)
+ )
+
+ def _show_create_table(self, connection, table, charset=None,
+ full_name=None):
+ return MySQLDialect._show_create_table(
+ self,
+ connection.contextual_connect(close_with_result=True).
+ execution_options(_oursql_plain_query=True),
+ table, charset, full_name
+ )
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, self.dbapi.ProgrammingError):
+ return e.errno is None and 'cursor' not in e.args[1] \
+ and e.args[1].endswith('closed')
+ else:
+ return e.errno in (2006, 2013, 2014, 2045, 2055)
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(database='db', username='user',
+ password='passwd')
+ opts.update(url.query)
+
+ util.coerce_kw_type(opts, 'port', int)
+ util.coerce_kw_type(opts, 'compress', bool)
+ util.coerce_kw_type(opts, 'autoping', bool)
+ util.coerce_kw_type(opts, 'raise_on_warnings', bool)
+
+ util.coerce_kw_type(opts, 'default_charset', bool)
+ if opts.pop('default_charset', False):
+ opts['charset'] = None
+ else:
+ util.coerce_kw_type(opts, 'charset', str)
+ opts['use_unicode'] = opts.get('use_unicode', True)
+ util.coerce_kw_type(opts, 'use_unicode', bool)
+
+ # FOUND_ROWS must be set in CLIENT_FLAGS to enable
+ # supports_sane_rowcount.
+ opts.setdefault('found_rows', True)
+
+ ssl = {}
+ for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
+ 'ssl_capath', 'ssl_cipher']:
+ if key in opts:
+ ssl[key[4:]] = opts[key]
+ util.coerce_kw_type(ssl, key[4:], str)
+ del opts[key]
+ if ssl:
+ opts['ssl'] = ssl
+
+ return [[], opts]
+
+ def _get_server_version_info(self, connection):
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(dbapi_con.server_info):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+ def _extract_error_code(self, exception):
+ return exception.errno
+
+ def _detect_charset(self, connection):
+ """Sniff out the character set in use for connection results."""
+
+ return connection.connection.charset
+
+ def _compat_fetchall(self, rp, charset=None):
+ """oursql isn't super-broken like MySQLdb, yaaay."""
+ return rp.fetchall()
+
+ def _compat_fetchone(self, rp, charset=None):
+ """oursql isn't super-broken like MySQLdb, yaaay."""
+ return rp.fetchone()
+
+ def _compat_first(self, rp, charset=None):
+ return rp.first()
+
+
+dialect = MySQLDialect_oursql
diff --git a/app/lib/sqlalchemy/dialects/mysql/pymysql.py b/app/lib/sqlalchemy/dialects/mysql/pymysql.py
new file mode 100644
index 0000000..b787bc2
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/pymysql.py
@@ -0,0 +1,70 @@
+# mysql/pymysql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: mysql+pymysql
+ :name: PyMySQL
+ :dbapi: pymysql
+ :connectstring: mysql+pymysql://:@/\
+[?]
+ :url: http://www.pymysql.org/
+
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
+
+MySQL-Python Compatibility
+--------------------------
+
+The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
+and targets 100% compatibility. Most behavioral notes for MySQL-python apply
+to the pymysql driver as well.
+
+"""
+
+from .mysqldb import MySQLDialect_mysqldb
+from ...util import langhelpers, py3k
+
+
+class MySQLDialect_pymysql(MySQLDialect_mysqldb):
+ driver = 'pymysql'
+
+ description_encoding = None
+
+ # generally, these two values should be both True
+ # or both False. PyMySQL unicode tests pass all the way back
+ # to 0.4 either way. See [ticket:3337]
+ supports_unicode_statements = True
+ supports_unicode_binds = True
+
+ def __init__(self, server_side_cursors=False, **kwargs):
+ super(MySQLDialect_pymysql, self).__init__(**kwargs)
+ self.server_side_cursors = server_side_cursors
+
+ @langhelpers.memoized_property
+ def supports_server_side_cursors(self):
+ try:
+ cursors = __import__('pymysql.cursors').cursors
+ self._sscursor = cursors.SSCursor
+ return True
+ except (ImportError, AttributeError):
+ return False
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('pymysql')
+
+ if py3k:
+ def _extract_error_code(self, exception):
+ if isinstance(exception.args[0], Exception):
+ exception = exception.args[0]
+ return exception.args[0]
+
+dialect = MySQLDialect_pymysql
diff --git a/app/lib/sqlalchemy/dialects/mysql/pyodbc.py b/app/lib/sqlalchemy/dialects/mysql/pyodbc.py
new file mode 100644
index 0000000..2ec6edf
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/pyodbc.py
@@ -0,0 +1,79 @@
+# mysql/pyodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+
+.. dialect:: mysql+pyodbc
+ :name: PyODBC
+ :dbapi: pyodbc
+ :connectstring: mysql+pyodbc://:@
+ :url: http://pypi.python.org/pypi/pyodbc/
+
+ .. note:: The PyODBC for MySQL dialect is not well supported, and
+ is subject to unresolved character encoding issues
+ which exist within the current ODBC drivers available.
+ (see http://code.google.com/p/pyodbc/issues/detail?id=25).
+ Other dialects for MySQL are recommended.
+
+"""
+
+from .base import MySQLDialect, MySQLExecutionContext
+from ...connectors.pyodbc import PyODBCConnector
+from ... import util
+import re
+
+
+class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
+
+ def get_lastrowid(self):
+ cursor = self.create_cursor()
+ cursor.execute("SELECT LAST_INSERT_ID()")
+ lastrowid = cursor.fetchone()[0]
+ cursor.close()
+ return lastrowid
+
+
+class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
+ supports_unicode_statements = False
+ execution_ctx_cls = MySQLExecutionContext_pyodbc
+
+ pyodbc_driver_name = "MySQL"
+
+ def __init__(self, **kw):
+ # deal with http://code.google.com/p/pyodbc/issues/detail?id=25
+ kw.setdefault('convert_unicode', True)
+ super(MySQLDialect_pyodbc, self).__init__(**kw)
+
+ def _detect_charset(self, connection):
+ """Sniff out the character set in use for connection results."""
+
+ # Prefer 'character_set_results' for the current connection over the
+ # value in the driver. SET NAMES or individual variable SETs will
+ # change the charset without updating the driver's view of the world.
+ #
+ # If it's decided that issuing that sort of SQL leaves you SOL, then
+ # this can prefer the driver value.
+ rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
+ opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
+ for key in ('character_set_connection', 'character_set'):
+ if opts.get(key, None):
+ return opts[key]
+
+ util.warn("Could not detect the connection character set. "
+ "Assuming latin1.")
+ return 'latin1'
+
+ def _extract_error_code(self, exception):
+ m = re.compile(r"\((\d+)\)").search(str(exception.args))
+ c = m.group(1)
+ if c:
+ return int(c)
+ else:
+ return None
+
+dialect = MySQLDialect_pyodbc
diff --git a/app/lib/sqlalchemy/dialects/mysql/reflection.py b/app/lib/sqlalchemy/dialects/mysql/reflection.py
new file mode 100644
index 0000000..f5f09b8
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/reflection.py
@@ -0,0 +1,450 @@
+# mysql/reflection.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+from ... import log, util
+from ... import types as sqltypes
+from .enumerated import _EnumeratedValues, SET
+from .types import DATETIME, TIME, TIMESTAMP
+
+
+class ReflectedState(object):
+ """Stores raw information about a SHOW CREATE TABLE statement."""
+
+ def __init__(self):
+ self.columns = []
+ self.table_options = {}
+ self.table_name = None
+ self.keys = []
+ self.constraints = []
+
+
+@log.class_logger
+class MySQLTableDefinitionParser(object):
+ """Parses the results of a SHOW CREATE TABLE statement."""
+
+ def __init__(self, dialect, preparer):
+ self.dialect = dialect
+ self.preparer = preparer
+ self._prep_regexes()
+
+ def parse(self, show_create, charset):
+ state = ReflectedState()
+ state.charset = charset
+ for line in re.split(r'\r?\n', show_create):
+ if line.startswith(' ' + self.preparer.initial_quote):
+ self._parse_column(line, state)
+ # a regular table options line
+ elif line.startswith(') '):
+ self._parse_table_options(line, state)
+ # an ANSI-mode table options line
+ elif line == ')':
+ pass
+ elif line.startswith('CREATE '):
+ self._parse_table_name(line, state)
+ # Not present in real reflection, but may be if
+ # loading from a file.
+ elif not line:
+ pass
+ else:
+ type_, spec = self._parse_constraints(line)
+ if type_ is None:
+ util.warn("Unknown schema content: %r" % line)
+ elif type_ == 'key':
+ state.keys.append(spec)
+ elif type_ == 'constraint':
+ state.constraints.append(spec)
+ else:
+ pass
+ return state
+
+ def _parse_constraints(self, line):
+ """Parse a KEY or CONSTRAINT line.
+
+ :param line: A line of SHOW CREATE TABLE output
+ """
+
+ # KEY
+ m = self._re_key.match(line)
+ if m:
+ spec = m.groupdict()
+ # convert columns into name, length pairs
+ spec['columns'] = self._parse_keyexprs(spec['columns'])
+ return 'key', spec
+
+ # CONSTRAINT
+ m = self._re_constraint.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['table'] = \
+ self.preparer.unformat_identifiers(spec['table'])
+ spec['local'] = [c[0]
+ for c in self._parse_keyexprs(spec['local'])]
+ spec['foreign'] = [c[0]
+ for c in self._parse_keyexprs(spec['foreign'])]
+ return 'constraint', spec
+
+ # PARTITION and SUBPARTITION
+ m = self._re_partition.match(line)
+ if m:
+ # Punt!
+ return 'partition', line
+
+ # No match.
+ return (None, line)
+
+ def _parse_table_name(self, line, state):
+ """Extract the table name.
+
+ :param line: The first line of SHOW CREATE TABLE
+ """
+
+ regex, cleanup = self._pr_name
+ m = regex.match(line)
+ if m:
+ state.table_name = cleanup(m.group('name'))
+
+ def _parse_table_options(self, line, state):
+ """Build a dictionary of all reflected table-level options.
+
+ :param line: The final line of SHOW CREATE TABLE output.
+ """
+
+ options = {}
+
+ if not line or line == ')':
+ pass
+
+ else:
+ rest_of_line = line[:]
+ for regex, cleanup in self._pr_options:
+ m = regex.search(rest_of_line)
+ if not m:
+ continue
+ directive, value = m.group('directive'), m.group('val')
+ if cleanup:
+ value = cleanup(value)
+ options[directive.lower()] = value
+ rest_of_line = regex.sub('', rest_of_line)
+
+ for nope in ('auto_increment', 'data directory', 'index directory'):
+ options.pop(nope, None)
+
+ for opt, val in options.items():
+ state.table_options['%s_%s' % (self.dialect.name, opt)] = val
+
+ def _parse_column(self, line, state):
+ """Extract column details.
+
+ Falls back to a 'minimal support' variant if full parse fails.
+
+ :param line: Any column-bearing line from SHOW CREATE TABLE
+ """
+
+ spec = None
+ m = self._re_column.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = True
+ else:
+ m = self._re_column_loose.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = False
+ if not spec:
+ util.warn("Unknown column definition %r" % line)
+ return
+ if not spec['full']:
+ util.warn("Incomplete reflection of column definition %r" % line)
+
+ name, type_, args = spec['name'], spec['coltype'], spec['arg']
+
+ try:
+ col_type = self.dialect.ischema_names[type_]
+ except KeyError:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (type_, name))
+ col_type = sqltypes.NullType
+
+ # Column type positional arguments eg. varchar(32)
+ if args is None or args == '':
+ type_args = []
+ elif args[0] == "'" and args[-1] == "'":
+ type_args = self._re_csv_str.findall(args)
+ else:
+ type_args = [int(v) for v in self._re_csv_int.findall(args)]
+
+ # Column type keyword options
+ type_kw = {}
+
+ if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
+ if type_args:
+ type_kw['fsp'] = type_args.pop(0)
+
+ for kw in ('unsigned', 'zerofill'):
+ if spec.get(kw, False):
+ type_kw[kw] = True
+ for kw in ('charset', 'collate'):
+ if spec.get(kw, False):
+ type_kw[kw] = spec[kw]
+ if issubclass(col_type, _EnumeratedValues):
+ type_args = _EnumeratedValues._strip_values(type_args)
+
+ if issubclass(col_type, SET) and '' in type_args:
+ type_kw['retrieve_as_bitwise'] = True
+
+ type_instance = col_type(*type_args, **type_kw)
+
+ col_kw = {}
+
+ # NOT NULL
+ col_kw['nullable'] = True
+ # this can be "NULL" in the case of TIMESTAMP
+ if spec.get('notnull', False) == 'NOT NULL':
+ col_kw['nullable'] = False
+
+ # AUTO_INCREMENT
+ if spec.get('autoincr', False):
+ col_kw['autoincrement'] = True
+ elif issubclass(col_type, sqltypes.Integer):
+ col_kw['autoincrement'] = False
+
+ # DEFAULT
+ default = spec.get('default', None)
+
+ if default == 'NULL':
+ # eliminates the need to deal with this later.
+ default = None
+
+ col_d = dict(name=name, type=type_instance, default=default)
+ col_d.update(col_kw)
+ state.columns.append(col_d)
+
+ def _describe_to_create(self, table_name, columns):
+ """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
+
+ DESCRIBE is a much simpler reflection and is sufficient for
+ reflecting views for runtime use. This method formats DDL
+ for columns only- keys are omitted.
+
+ :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
+ SHOW FULL COLUMNS FROM rows must be rearranged for use with
+ this function.
+ """
+
+ buffer = []
+ for row in columns:
+ (name, col_type, nullable, default, extra) = \
+ [row[i] for i in (0, 1, 2, 4, 5)]
+
+ line = [' ']
+ line.append(self.preparer.quote_identifier(name))
+ line.append(col_type)
+ if not nullable:
+ line.append('NOT NULL')
+ if default:
+ if 'auto_increment' in default:
+ pass
+ elif (col_type.startswith('timestamp') and
+ default.startswith('C')):
+ line.append('DEFAULT')
+ line.append(default)
+ elif default == 'NULL':
+ line.append('DEFAULT')
+ line.append(default)
+ else:
+ line.append('DEFAULT')
+ line.append("'%s'" % default.replace("'", "''"))
+ if extra:
+ line.append(extra)
+
+ buffer.append(' '.join(line))
+
+ return ''.join([('CREATE TABLE %s (\n' %
+ self.preparer.quote_identifier(table_name)),
+ ',\n'.join(buffer),
+ '\n) '])
+
+ def _parse_keyexprs(self, identifiers):
+ """Unpack '"col"(2),"col" ASC'-ish strings into components."""
+
+ return self._re_keyexprs.findall(identifiers)
+
+ def _prep_regexes(self):
+ """Pre-compile regular expressions."""
+
+ self._re_columns = []
+ self._pr_options = []
+
+ _final = self.preparer.final_quote
+
+ quotes = dict(zip(('iq', 'fq', 'esc_fq'),
+ [re.escape(s) for s in
+ (self.preparer.initial_quote,
+ _final,
+ self.preparer._escape_identifier(_final))]))
+
+ self._pr_name = _pr_compile(
+ r'^CREATE (?:\w+ +)?TABLE +'
+ r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
+ self.preparer._unescape_identifier)
+
+ # `col`,`col2`(32),`col3`(15) DESC
+ #
+ # Note: ASC and DESC aren't reflected, so we'll punt...
+ self._re_keyexprs = _re_compile(
+ r'(?:'
+ r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
+ r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
+
+ # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
+ self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
+
+ # 123 or 123,456
+ self._re_csv_int = _re_compile(r'\d+')
+
+ # `colname` [type opts]
+ # (NOT NULL | NULL)
+ # DEFAULT ('value' | CURRENT_TIMESTAMP...)
+ # COMMENT 'comment'
+ # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
+ # STORAGE (DISK|MEMORY)
+ self._re_column = _re_compile(
+ r' '
+ r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P\w+)'
+ r'(?:\((?P(?:\d+|\d+,\d+|'
+ r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
+ r'(?: +(?PUNSIGNED))?'
+ r'(?: +(?PZEROFILL))?'
+ r'(?: +CHARACTER SET +(?P[\w_]+))?'
+ r'(?: +COLLATE +(?P[\w_]+))?'
+ r'(?: +(?P(?:NOT )?NULL))?'
+ r'(?: +DEFAULT +(?P'
+ r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
+ r'(?: +ON UPDATE \w+)?)'
+ r'))?'
+ r'(?: +(?PAUTO_INCREMENT))?'
+ r'(?: +COMMENT +(P(?:\x27\x27|[^\x27])+))?'
+ r'(?: +COLUMN_FORMAT +(?P\w+))?'
+ r'(?: +STORAGE +(?P\w+))?'
+ r'(?: +(?P.*))?'
+ r',?$'
+ % quotes
+ )
+
+ # Fallback, try to parse as little as possible
+ self._re_column_loose = _re_compile(
+ r' '
+ r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P\w+)'
+ r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
+ r'.*?(?P(?:NOT )NULL)?'
+ % quotes
+ )
+
+ # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
+ # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
+ # KEY_BLOCK_SIZE size | WITH PARSER name
+ self._re_key = _re_compile(
+ r' '
+ r'(?:(?P\S+) )?KEY'
+ r'(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
+ r'(?: +USING +(?P\S+))?'
+ r' +\((?P.+?)\)'
+ r'(?: +USING +(?P\S+))?'
+ r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P\S+))?'
+ r'(?: +WITH PARSER +(?P\S+))?'
+ r'(?: +COMMENT +(?P(\x27\x27|\x27([^\x27])*?\x27)+))?'
+ r',?$'
+ % quotes
+ )
+
+ # CONSTRAINT `name` FOREIGN KEY (`local_col`)
+ # REFERENCES `remote` (`remote_col`)
+ # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
+ # ON DELETE CASCADE ON UPDATE RESTRICT
+ #
+ # unique constraints come back as KEYs
+ kw = quotes.copy()
+ kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
+ self._re_constraint = _re_compile(
+ r' '
+ r'CONSTRAINT +'
+ r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'FOREIGN KEY +'
+ r'\((?P[^\)]+?)\) REFERENCES +'
+ r'(?P%(iq)s[^%(fq)s]+%(fq)s'
+ r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
+ r'\((?P[^\)]+?)\)'
+ r'(?: +(?PMATCH \w+))?'
+ r'(?: +ON DELETE (?P%(on)s))?'
+ r'(?: +ON UPDATE (?P%(on)s))?'
+ % kw
+ )
+
+ # PARTITION
+ #
+ # punt!
+ self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
+
+ # Table-level options (COLLATE, ENGINE, etc.)
+ # Do the string options first, since they have quoted
+ # strings we need to get rid of.
+ for option in _options_of_type_string:
+ self._add_option_string(option)
+
+ for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
+ 'AVG_ROW_LENGTH', 'CHARACTER SET',
+ 'DEFAULT CHARSET', 'CHECKSUM',
+ 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
+ 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
+ 'KEY_BLOCK_SIZE'):
+ self._add_option_word(option)
+
+ self._add_option_regex('UNION', r'\([^\)]+\)')
+ self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
+ self._add_option_regex(
+ 'RAID_TYPE',
+ r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
+
+ _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
+
+ def _add_option_string(self, directive):
+ regex = (r'(?P%s)%s'
+ r"'(?P(?:[^']|'')*?)'(?!')" %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(
+ regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
+ ))
+
+ def _add_option_word(self, directive):
+ regex = (r'(?P%s)%s'
+ r'(?P\w+)' %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(regex))
+
+ def _add_option_regex(self, directive, regex):
+ regex = (r'(?P%s)%s'
+ r'(?P%s)' %
+ (re.escape(directive), self._optional_equals, regex))
+ self._pr_options.append(_pr_compile(regex))
+
+_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
+ 'PASSWORD', 'CONNECTION')
+
+
+def _pr_compile(regex, cleanup=None):
+ """Prepare a 2-tuple of compiled regex and callable."""
+
+ return (_re_compile(regex), cleanup)
+
+
+def _re_compile(regex):
+ """Compile a string to regex, I and UNICODE."""
+
+ return re.compile(regex, re.I | re.UNICODE)
diff --git a/app/lib/sqlalchemy/dialects/mysql/types.py b/app/lib/sqlalchemy/dialects/mysql/types.py
new file mode 100644
index 0000000..cf80d79
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/types.py
@@ -0,0 +1,766 @@
+# mysql/types.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import datetime
+from ... import exc, util
+from ... import types as sqltypes
+
+
+class _NumericType(object):
+ """Base for MySQL numeric types.
+
+ This is the base both for NUMERIC as well as INTEGER, hence
+ it's a mixin.
+
+ """
+
+ def __init__(self, unsigned=False, zerofill=False, **kw):
+ self.unsigned = unsigned
+ self.zerofill = zerofill
+ super(_NumericType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_NumericType, sqltypes.Numeric])
+
+
+class _FloatType(_NumericType, sqltypes.Float):
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ if isinstance(self, (REAL, DOUBLE)) and \
+ (
+ (precision is None and scale is not None) or
+ (precision is not None and scale is None)
+ ):
+ raise exc.ArgumentError(
+ "You must specify both precision and scale or omit "
+ "both altogether.")
+ super(_FloatType, self).__init__(
+ precision=precision, asdecimal=asdecimal, **kw)
+ self.scale = scale
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_FloatType,
+ _NumericType,
+ sqltypes.Float])
+
+
+class _IntegerType(_NumericType, sqltypes.Integer):
+ def __init__(self, display_width=None, **kw):
+ self.display_width = display_width
+ super(_IntegerType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_IntegerType,
+ _NumericType,
+ sqltypes.Integer])
+
+
+class _StringType(sqltypes.String):
+ """Base for MySQL string types."""
+
+ def __init__(self, charset=None, collation=None,
+ ascii=False, binary=False, unicode=False,
+ national=False, **kw):
+ self.charset = charset
+
+ # allow collate= or collation=
+ kw.setdefault('collation', kw.pop('collate', collation))
+
+ self.ascii = ascii
+ self.unicode = unicode
+ self.binary = binary
+ self.national = national
+ super(_StringType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_StringType, sqltypes.String])
+
+
+class _MatchType(sqltypes.Float, sqltypes.MatchType):
+ def __init__(self, **kw):
+ # TODO: float arguments?
+ sqltypes.Float.__init__(self)
+ sqltypes.MatchType.__init__(self)
+
+
+
+class NUMERIC(_NumericType, sqltypes.NUMERIC):
+ """MySQL NUMERIC type."""
+
+ __visit_name__ = 'NUMERIC'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a NUMERIC.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(NUMERIC, self).__init__(precision=precision,
+ scale=scale, asdecimal=asdecimal, **kw)
+
+
+class DECIMAL(_NumericType, sqltypes.DECIMAL):
+ """MySQL DECIMAL type."""
+
+ __visit_name__ = 'DECIMAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DECIMAL.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DECIMAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class DOUBLE(_FloatType):
+ """MySQL DOUBLE type."""
+
+ __visit_name__ = 'DOUBLE'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DOUBLE.
+
+ .. note::
+
+ The :class:`.DOUBLE` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DOUBLE, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class REAL(_FloatType, sqltypes.REAL):
+ """MySQL REAL type."""
+
+ __visit_name__ = 'REAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a REAL.
+
+ .. note::
+
+ The :class:`.REAL` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(REAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class FLOAT(_FloatType, sqltypes.FLOAT):
+ """MySQL FLOAT type."""
+
+ __visit_name__ = 'FLOAT'
+
+ def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
+ """Construct a FLOAT.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(FLOAT, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+ def bind_processor(self, dialect):
+ return None
+
+
+class INTEGER(_IntegerType, sqltypes.INTEGER):
+ """MySQL INTEGER type."""
+
+ __visit_name__ = 'INTEGER'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct an INTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(INTEGER, self).__init__(display_width=display_width, **kw)
+
+
+class BIGINT(_IntegerType, sqltypes.BIGINT):
+ """MySQL BIGINTEGER type."""
+
+ __visit_name__ = 'BIGINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a BIGINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(BIGINT, self).__init__(display_width=display_width, **kw)
+
+
+class MEDIUMINT(_IntegerType):
+ """MySQL MEDIUMINTEGER type."""
+
+ __visit_name__ = 'MEDIUMINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a MEDIUMINTEGER
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
+
+
+class TINYINT(_IntegerType):
+ """MySQL TINYINT type."""
+
+ __visit_name__ = 'TINYINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a TINYINT.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(TINYINT, self).__init__(display_width=display_width, **kw)
+
+
+class SMALLINT(_IntegerType, sqltypes.SMALLINT):
+ """MySQL SMALLINTEGER type."""
+
+ __visit_name__ = 'SMALLINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a SMALLINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(SMALLINT, self).__init__(display_width=display_width, **kw)
+
+
+class BIT(sqltypes.TypeEngine):
+ """MySQL BIT type.
+
+ This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
+ for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
+ MSTinyInteger() type.
+
+ """
+
+ __visit_name__ = 'BIT'
+
+ def __init__(self, length=None):
+ """Construct a BIT.
+
+ :param length: Optional, number of bits.
+
+ """
+ self.length = length
+
+ def result_processor(self, dialect, coltype):
+ """Convert a MySQL's 64 bit, variable length binary string to a long.
+
+ TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
+ already do this, so this logic should be moved to those dialects.
+
+ """
+
+ def process(value):
+ if value is not None:
+ v = 0
+ for i in value:
+ if not isinstance(i, int):
+ i = ord(i) # convert byte to int on Python 2
+ v = v << 8 | i
+ return v
+ return value
+ return process
+
+
+class TIME(sqltypes.TIME):
+ """MySQL TIME type. """
+
+ __visit_name__ = 'TIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8 The MySQL-specific TIME
+ type as well as fractional seconds support.
+
+ """
+ super(TIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+ def result_processor(self, dialect, coltype):
+ time = datetime.time
+
+ def process(value):
+ # convert from a timedelta value
+ if value is not None:
+ microseconds = value.microseconds
+ seconds = value.seconds
+ minutes = seconds // 60
+ return time(minutes // 60,
+ minutes % 60,
+ seconds - minutes * 60,
+ microsecond=microseconds)
+ else:
+ return None
+ return process
+
+
+class TIMESTAMP(sqltypes.TIMESTAMP):
+ """MySQL TIMESTAMP type.
+
+ """
+
+ __visit_name__ = 'TIMESTAMP'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIMESTAMP type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIMESTAMP type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
+ with fractional seconds support.
+
+ """
+ super(TIMESTAMP, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class DATETIME(sqltypes.DATETIME):
+ """MySQL DATETIME type.
+
+ """
+
+ __visit_name__ = 'DATETIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL DATETIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the DATETIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
+ with fractional seconds support.
+
+ """
+ super(DATETIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class YEAR(sqltypes.TypeEngine):
+ """MySQL YEAR type, for single byte storage of years 1901-2155."""
+
+ __visit_name__ = 'YEAR'
+
+ def __init__(self, display_width=None):
+ self.display_width = display_width
+
+
+class TEXT(_StringType, sqltypes.TEXT):
+ """MySQL TEXT type, for text up to 2^16 characters."""
+
+ __visit_name__ = 'TEXT'
+
+ def __init__(self, length=None, **kw):
+ """Construct a TEXT.
+
+ :param length: Optional, if provided the server may optimize storage
+ by substituting the smallest TEXT type sufficient to store
+ ``length`` characters.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TEXT, self).__init__(length=length, **kw)
+
+
+class TINYTEXT(_StringType):
+ """MySQL TINYTEXT type, for text up to 2^8 characters."""
+
+ __visit_name__ = 'TINYTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a TINYTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TINYTEXT, self).__init__(**kwargs)
+
+
+class MEDIUMTEXT(_StringType):
+ """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
+
+ __visit_name__ = 'MEDIUMTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a MEDIUMTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(MEDIUMTEXT, self).__init__(**kwargs)
+
+
+class LONGTEXT(_StringType):
+ """MySQL LONGTEXT type, for text up to 2^32 characters."""
+
+ __visit_name__ = 'LONGTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a LONGTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(LONGTEXT, self).__init__(**kwargs)
+
+
+class VARCHAR(_StringType, sqltypes.VARCHAR):
+ """MySQL VARCHAR type, for variable-length character data."""
+
+ __visit_name__ = 'VARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a VARCHAR.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(VARCHAR, self).__init__(length=length, **kwargs)
+
+
+class CHAR(_StringType, sqltypes.CHAR):
+ """MySQL CHAR type, for fixed-length character data."""
+
+ __visit_name__ = 'CHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a CHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ super(CHAR, self).__init__(length=length, **kwargs)
+
+ @classmethod
+ def _adapt_string_for_cast(self, type_):
+ # copy the given string type into a CHAR
+ # for the purposes of rendering a CAST expression
+ type_ = sqltypes.to_instance(type_)
+ if isinstance(type_, sqltypes.CHAR):
+ return type_
+ elif isinstance(type_, _StringType):
+ return CHAR(
+ length=type_.length,
+ charset=type_.charset,
+ collation=type_.collation,
+ ascii=type_.ascii,
+ binary=type_.binary,
+ unicode=type_.unicode,
+ national=False # not supported in CAST
+ )
+ else:
+ return CHAR(length=type_.length)
+
+
+class NVARCHAR(_StringType, sqltypes.NVARCHAR):
+ """MySQL NVARCHAR type.
+
+ For variable-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NVARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NVARCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NVARCHAR, self).__init__(length=length, **kwargs)
+
+
+class NCHAR(_StringType, sqltypes.NCHAR):
+ """MySQL NCHAR type.
+
+ For fixed-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NCHAR, self).__init__(length=length, **kwargs)
+
+
+class TINYBLOB(sqltypes._Binary):
+ """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
+
+ __visit_name__ = 'TINYBLOB'
+
+
+class MEDIUMBLOB(sqltypes._Binary):
+ """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
+
+ __visit_name__ = 'MEDIUMBLOB'
+
+
+class LONGBLOB(sqltypes._Binary):
+ """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
+
+ __visit_name__ = 'LONGBLOB'
diff --git a/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py
new file mode 100644
index 0000000..9c92be4
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/mysql/zxjdbc.py
@@ -0,0 +1,117 @@
+# mysql/zxjdbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: mysql+zxjdbc
+ :name: zxjdbc for Jython
+ :dbapi: zxjdbc
+ :connectstring: mysql+zxjdbc://:@[:]/\
+
+ :driverurl: http://dev.mysql.com/downloads/connector/j/
+
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
+
+Character Sets
+--------------
+
+SQLAlchemy zxjdbc dialects pass unicode straight through to the
+zxjdbc/JDBC layer. To allow multiple character sets to be sent from the
+MySQL Connector/J JDBC driver, by default SQLAlchemy sets its
+``characterEncoding`` connection property to ``UTF-8``. It may be
+overridden via a ``create_engine`` URL parameter.
+
+"""
+import re
+
+from ... import types as sqltypes, util
+from ...connectors.zxJDBC import ZxJDBCConnector
+from .base import BIT, MySQLDialect, MySQLExecutionContext
+
+
+class _ZxJDBCBit(BIT):
+ def result_processor(self, dialect, coltype):
+ """Converts boolean or byte arrays from MySQL Connector/J to longs."""
+ def process(value):
+ if value is None:
+ return value
+ if isinstance(value, bool):
+ return int(value)
+ v = 0
+ for i in value:
+ v = v << 8 | (i & 0xff)
+ value = v
+ return value
+ return process
+
+
+class MySQLExecutionContext_zxjdbc(MySQLExecutionContext):
+ def get_lastrowid(self):
+ cursor = self.create_cursor()
+ cursor.execute("SELECT LAST_INSERT_ID()")
+ lastrowid = cursor.fetchone()[0]
+ cursor.close()
+ return lastrowid
+
+
+class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect):
+ jdbc_db_name = 'mysql'
+ jdbc_driver_name = 'com.mysql.jdbc.Driver'
+
+ execution_ctx_cls = MySQLExecutionContext_zxjdbc
+
+ colspecs = util.update_copy(
+ MySQLDialect.colspecs,
+ {
+ sqltypes.Time: sqltypes.Time,
+ BIT: _ZxJDBCBit
+ }
+ )
+
+ def _detect_charset(self, connection):
+ """Sniff out the character set in use for connection results."""
+ # Prefer 'character_set_results' for the current connection over the
+ # value in the driver. SET NAMES or individual variable SETs will
+ # change the charset without updating the driver's view of the world.
+ #
+ # If it's decided that issuing that sort of SQL leaves you SOL, then
+ # this can prefer the driver value.
+ rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
+ opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs))
+ for key in ('character_set_connection', 'character_set'):
+ if opts.get(key, None):
+ return opts[key]
+
+ util.warn("Could not detect the connection character set. "
+ "Assuming latin1.")
+ return 'latin1'
+
+ def _driver_kwargs(self):
+ """return kw arg dict to be sent to connect()."""
+ return dict(characterEncoding='UTF-8', yearIsDateType='false')
+
+ def _extract_error_code(self, exception):
+ # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist
+ # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' ()
+ m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args))
+ c = m.group(1)
+ if c:
+ return int(c)
+
+ def _get_server_version_info(self, connection):
+ dbapi_con = connection.connection
+ version = []
+ r = re.compile(r'[.\-]')
+ for n in r.split(dbapi_con.dbversion):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
+
+dialect = MySQLDialect_zxjdbc
diff --git a/app/lib/sqlalchemy/dialects/oracle/__init__.py b/app/lib/sqlalchemy/dialects/oracle/__init__.py
new file mode 100644
index 0000000..210fe50
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/oracle/__init__.py
@@ -0,0 +1,24 @@
+# oracle/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc
+
+base.dialect = cx_oracle.dialect
+
+from sqlalchemy.dialects.oracle.base import \
+ VARCHAR, NVARCHAR, CHAR, DATE, NUMBER,\
+ BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
+ FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
+ VARCHAR2, NVARCHAR2, ROWID, dialect
+
+
+__all__ = (
+ 'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER',
+ 'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
+ 'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
+ 'VARCHAR2', 'NVARCHAR2', 'ROWID'
+)
diff --git a/app/lib/sqlalchemy/dialects/oracle/base.py b/app/lib/sqlalchemy/dialects/oracle/base.py
new file mode 100644
index 0000000..7c23e9c
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/oracle/base.py
@@ -0,0 +1,1602 @@
+# oracle/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: oracle
+ :name: Oracle
+
+ Oracle version 8 through current (11g at the time of this writing) are
+ supported.
+
+Connect Arguments
+-----------------
+
+The dialect supports several :func:`~sqlalchemy.create_engine()` arguments
+which affect the behavior of the dialect regardless of driver in use.
+
+* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8).
+ Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used
+ for joins.
+
+* ``optimize_limits`` - defaults to ``False``. see the section on
+ LIMIT/OFFSET.
+
+* ``use_binds_for_limits`` - defaults to ``True``. see the section on
+ LIMIT/OFFSET.
+
+Auto Increment Behavior
+-----------------------
+
+SQLAlchemy Table objects which include integer primary keys are usually
+assumed to have "autoincrementing" behavior, meaning they can generate their
+own primary key values upon INSERT. Since Oracle has no "autoincrement"
+feature, SQLAlchemy relies upon sequences to produce these values. With the
+Oracle dialect, *a sequence must always be explicitly specified to enable
+autoincrement*. This is divergent with the majority of documentation
+examples which assume the usage of an autoincrement-capable database. To
+specify sequences, use the sqlalchemy.schema.Sequence object which is passed
+to a Column construct::
+
+ t = Table('mytable', metadata,
+ Column('id', Integer, Sequence('id_seq'), primary_key=True),
+ Column(...), ...
+ )
+
+This step is also required when using table reflection, i.e. autoload=True::
+
+ t = Table('mytable', metadata,
+ Column('id', Integer, Sequence('id_seq'), primary_key=True),
+ autoload=True
+ )
+
+Identifier Casing
+-----------------
+
+In Oracle, the data dictionary represents all case insensitive identifier
+names using UPPERCASE text. SQLAlchemy on the other hand considers an
+all-lower case identifier name to be case insensitive. The Oracle dialect
+converts all case insensitive identifiers to and from those two formats during
+schema level communication, such as reflection of tables and indexes. Using
+an UPPERCASE name on the SQLAlchemy side indicates a case sensitive
+identifier, and SQLAlchemy will quote the name - this will cause mismatches
+against data dictionary data received from Oracle, so unless identifier names
+have been truly created as case sensitive (i.e. using quoted names), all
+lowercase names should be used on the SQLAlchemy side.
+
+
+LIMIT/OFFSET Support
+--------------------
+
+Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
+a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
+is taken from
+http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html .
+
+There are two options which affect its behavior:
+
+* the "FIRST ROWS()" optimization keyword is not used by default. To enable
+ the usage of this optimization directive, specify ``optimize_limits=True``
+ to :func:`.create_engine`.
+* the values passed for the limit/offset are sent as bound parameters. Some
+ users have observed that Oracle produces a poor query plan when the values
+ are sent as binds and not rendered literally. To render the limit/offset
+ values literally within the SQL statement, specify
+ ``use_binds_for_limits=False`` to :func:`.create_engine`.
+
+Some users have reported better performance when the entirely different
+approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to
+provide LIMIT/OFFSET (note that the majority of users don't observe this).
+To suit this case the method used for LIMIT/OFFSET can be replaced entirely.
+See the recipe at
+http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
+which installs a select compiler that overrides the generation of limit/offset
+with a window function.
+
+.. _oracle_returning:
+
+RETURNING Support
+-----------------
+
+The Oracle database supports a limited form of RETURNING, in order to retrieve
+result sets of matched rows from INSERT, UPDATE and DELETE statements.
+Oracle's RETURNING..INTO syntax only supports one row being returned, as it
+relies upon OUT parameters in order to function. In addition, supported
+DBAPIs have further limitations (see :ref:`cx_oracle_returning`).
+
+SQLAlchemy's "implicit returning" feature, which employs RETURNING within an
+INSERT and sometimes an UPDATE statement in order to fetch newly generated
+primary key values and other SQL defaults and expressions, is normally enabled
+on the Oracle backend. By default, "implicit returning" typically only
+fetches the value of a single ``nextval(some_seq)`` expression embedded into
+an INSERT in order to increment a sequence within an INSERT statement and get
+the value back at the same time. To disable this feature across the board,
+specify ``implicit_returning=False`` to :func:`.create_engine`::
+
+ engine = create_engine("oracle://scott:tiger@dsn",
+ implicit_returning=False)
+
+Implicit returning can also be disabled on a table-by-table basis as a table
+option::
+
+ # Core Table
+ my_table = Table("my_table", metadata, ..., implicit_returning=False)
+
+
+ # declarative
+ class MyClass(Base):
+ __tablename__ = 'my_table'
+ __table_args__ = {"implicit_returning": False}
+
+.. seealso::
+
+ :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on
+ implicit returning.
+
+ON UPDATE CASCADE
+-----------------
+
+Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based
+solution is available at
+http://asktom.oracle.com/tkyte/update_cascade/index.html .
+
+When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
+cascading updates - specify ForeignKey objects using the
+"deferrable=True, initially='deferred'" keyword arguments,
+and specify "passive_updates=False" on each relationship().
+
+Oracle 8 Compatibility
+----------------------
+
+When Oracle 8 is detected, the dialect internally configures itself to the
+following behaviors:
+
+* the use_ansi flag is set to False. This has the effect of converting all
+ JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
+ makes use of Oracle's (+) operator.
+
+* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
+ the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are
+ issued instead. This because these types don't seem to work correctly on
+ Oracle 8 even though they are available. The
+ :class:`~sqlalchemy.types.NVARCHAR` and
+ :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
+ NVARCHAR2 and NCLOB.
+
+* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
+ encodes all Python unicode objects to "string" before passing in as bind
+ parameters.
+
+Synonym/DBLINK Reflection
+-------------------------
+
+When using reflection with Table objects, the dialect can optionally search
+for tables indicated by synonyms, either in local or remote schemas or
+accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
+a keyword argument to the :class:`.Table` construct::
+
+ some_table = Table('some_table', autoload=True,
+ autoload_with=some_engine,
+ oracle_resolve_synonyms=True)
+
+When this flag is set, the given name (such as ``some_table`` above) will
+be searched not just in the ``ALL_TABLES`` view, but also within the
+``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
+name. If the synonym is located and refers to a DBLINK, the oracle dialect
+knows how to locate the table's information using DBLINK syntax(e.g.
+``@dblink``).
+
+``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
+accepted, including methods such as :meth:`.MetaData.reflect` and
+:meth:`.Inspector.get_columns`.
+
+If synonyms are not in use, this flag should be left disabled.
+
+Table names with SYSTEM/SYSAUX tablespaces
+-------------------------------------------
+
+The :meth:`.Inspector.get_table_names` and
+:meth:`.Inspector.get_temp_table_names`
+methods each return a list of table names for the current engine. These methods
+are also part of the reflection which occurs within an operation such as
+:meth:`.MetaData.reflect`. By default, these operations exclude the ``SYSTEM``
+and ``SYSAUX`` tablespaces from the operation. In order to change this, the
+default list of tablespaces excluded can be changed at the engine level using
+the ``exclude_tablespaces`` parameter::
+
+ # exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
+ e = create_engine(
+ "oracle://scott:tiger@xe",
+ exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"])
+
+.. versionadded:: 1.1
+
+DateTime Compatibility
+----------------------
+
+Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``,
+which can actually store a date and time value. For this reason, the Oracle
+dialect provides a type :class:`.oracle.DATE` which is a subclass of
+:class:`.DateTime`. This type has no special behavior, and is only
+present as a "marker" for this type; additionally, when a database column
+is reflected and the type is reported as ``DATE``, the time-supporting
+:class:`.oracle.DATE` type is used.
+
+.. versionchanged:: 0.9.4 Added :class:`.oracle.DATE` to subclass
+ :class:`.DateTime`. This is a change as previous versions
+ would reflect a ``DATE`` column as :class:`.types.DATE`, which subclasses
+ :class:`.Date`. The only significance here is for schemes that are
+ examining the type of column for use in special Python translations or
+ for migrating schemas to other database backends.
+
+.. _oracle_table_options:
+
+Oracle Table Options
+-------------------------
+
+The CREATE TABLE phrase supports the following options with Oracle
+in conjunction with the :class:`.Table` construct:
+
+
+* ``ON COMMIT``::
+
+ Table(
+ "some_table", metadata, ...,
+ prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS')
+
+.. versionadded:: 1.0.0
+
+* ``COMPRESS``::
+
+ Table('mytable', metadata, Column('data', String(32)),
+ oracle_compress=True)
+
+ Table('mytable', metadata, Column('data', String(32)),
+ oracle_compress=6)
+
+ The ``oracle_compress`` parameter accepts either an integer compression
+ level, or ``True`` to use the default compression level.
+
+.. versionadded:: 1.0.0
+
+.. _oracle_index_options:
+
+Oracle Specific Index Options
+-----------------------------
+
+Bitmap Indexes
+~~~~~~~~~~~~~~
+
+You can specify the ``oracle_bitmap`` parameter to create a bitmap index
+instead of a B-tree index::
+
+ Index('my_index', my_table.c.data, oracle_bitmap=True)
+
+Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
+check for such limitations, only the database will.
+
+.. versionadded:: 1.0.0
+
+Index compression
+~~~~~~~~~~~~~~~~~
+
+Oracle has a more efficient storage mode for indexes containing lots of
+repeated values. Use the ``oracle_compress`` parameter to turn on key c
+ompression::
+
+ Index('my_index', my_table.c.data, oracle_compress=True)
+
+ Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
+ oracle_compress=1)
+
+The ``oracle_compress`` parameter accepts either an integer specifying the
+number of prefix columns to compress, or ``True`` to use the default (all
+columns for non-unique indexes, all but the last column for unique indexes).
+
+.. versionadded:: 1.0.0
+
+"""
+
+import re
+
+from sqlalchemy import util, sql
+from sqlalchemy.engine import default, reflection
+from sqlalchemy.sql import compiler, visitors, expression, util as sql_util
+from sqlalchemy.sql import operators as sql_operators
+from sqlalchemy.sql.elements import quoted_name
+from sqlalchemy import types as sqltypes, schema as sa_schema
+from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
+ BLOB, CLOB, TIMESTAMP, FLOAT
+
+RESERVED_WORDS = \
+ set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '
+ 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '
+ 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '
+ 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '
+ 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '
+ 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '
+ 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '
+ 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '
+ 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split())
+
+NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER '
+ 'CURRENT_TIME CURRENT_TIMESTAMP'.split())
+
+
+class RAW(sqltypes._Binary):
+ __visit_name__ = 'RAW'
+OracleRaw = RAW
+
+
+class NCLOB(sqltypes.Text):
+ __visit_name__ = 'NCLOB'
+
+
+class VARCHAR2(VARCHAR):
+ __visit_name__ = 'VARCHAR2'
+
+NVARCHAR2 = NVARCHAR
+
+
+class NUMBER(sqltypes.Numeric, sqltypes.Integer):
+ __visit_name__ = 'NUMBER'
+
+ def __init__(self, precision=None, scale=None, asdecimal=None):
+ if asdecimal is None:
+ asdecimal = bool(scale and scale > 0)
+
+ super(NUMBER, self).__init__(
+ precision=precision, scale=scale, asdecimal=asdecimal)
+
+ def adapt(self, impltype):
+ ret = super(NUMBER, self).adapt(impltype)
+ # leave a hint for the DBAPI handler
+ ret._is_oracle_number = True
+ return ret
+
+ @property
+ def _type_affinity(self):
+ if bool(self.scale and self.scale > 0):
+ return sqltypes.Numeric
+ else:
+ return sqltypes.Integer
+
+
+class DOUBLE_PRECISION(sqltypes.Numeric):
+ __visit_name__ = 'DOUBLE_PRECISION'
+
+ def __init__(self, precision=None, scale=None, asdecimal=None):
+ if asdecimal is None:
+ asdecimal = False
+
+ super(DOUBLE_PRECISION, self).__init__(
+ precision=precision, scale=scale, asdecimal=asdecimal)
+
+
+class BFILE(sqltypes.LargeBinary):
+ __visit_name__ = 'BFILE'
+
+
+class LONG(sqltypes.Text):
+ __visit_name__ = 'LONG'
+
+
+class DATE(sqltypes.DateTime):
+ """Provide the oracle DATE type.
+
+ This type has no special Python behavior, except that it subclasses
+ :class:`.types.DateTime`; this is to suit the fact that the Oracle
+ ``DATE`` type supports a time value.
+
+ .. versionadded:: 0.9.4
+
+ """
+ __visit_name__ = 'DATE'
+
+ def _compare_type_affinity(self, other):
+ return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
+
+
+class INTERVAL(sqltypes.TypeEngine):
+ __visit_name__ = 'INTERVAL'
+
+ def __init__(self,
+ day_precision=None,
+ second_precision=None):
+ """Construct an INTERVAL.
+
+ Note that only DAY TO SECOND intervals are currently supported.
+ This is due to a lack of support for YEAR TO MONTH intervals
+ within available DBAPIs (cx_oracle and zxjdbc).
+
+ :param day_precision: the day precision value. this is the number of
+ digits to store for the day field. Defaults to "2"
+ :param second_precision: the second precision value. this is the
+ number of digits to store for the fractional seconds field.
+ Defaults to "6".
+
+ """
+ self.day_precision = day_precision
+ self.second_precision = second_precision
+
+ @classmethod
+ def _adapt_from_generic_interval(cls, interval):
+ return INTERVAL(day_precision=interval.day_precision,
+ second_precision=interval.second_precision)
+
+ @property
+ def _type_affinity(self):
+ return sqltypes.Interval
+
+
+class ROWID(sqltypes.TypeEngine):
+ """Oracle ROWID type.
+
+ When used in a cast() or similar, generates ROWID.
+
+ """
+ __visit_name__ = 'ROWID'
+
+
+class _OracleBoolean(sqltypes.Boolean):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.NUMBER
+
+colspecs = {
+ sqltypes.Boolean: _OracleBoolean,
+ sqltypes.Interval: INTERVAL,
+ sqltypes.DateTime: DATE
+}
+
+ischema_names = {
+ 'VARCHAR2': VARCHAR,
+ 'NVARCHAR2': NVARCHAR,
+ 'CHAR': CHAR,
+ 'DATE': DATE,
+ 'NUMBER': NUMBER,
+ 'BLOB': BLOB,
+ 'BFILE': BFILE,
+ 'CLOB': CLOB,
+ 'NCLOB': NCLOB,
+ 'TIMESTAMP': TIMESTAMP,
+ 'TIMESTAMP WITH TIME ZONE': TIMESTAMP,
+ 'INTERVAL DAY TO SECOND': INTERVAL,
+ 'RAW': RAW,
+ 'FLOAT': FLOAT,
+ 'DOUBLE PRECISION': DOUBLE_PRECISION,
+ 'LONG': LONG,
+}
+
+
+class OracleTypeCompiler(compiler.GenericTypeCompiler):
+ # Note:
+ # Oracle DATE == DATETIME
+ # Oracle does not allow milliseconds in DATE
+ # Oracle does not support TIME columns
+
+ def visit_datetime(self, type_, **kw):
+ return self.visit_DATE(type_, **kw)
+
+ def visit_float(self, type_, **kw):
+ return self.visit_FLOAT(type_, **kw)
+
+ def visit_unicode(self, type_, **kw):
+ if self.dialect._supports_nchar:
+ return self.visit_NVARCHAR2(type_, **kw)
+ else:
+ return self.visit_VARCHAR2(type_, **kw)
+
+ def visit_INTERVAL(self, type_, **kw):
+ return "INTERVAL DAY%s TO SECOND%s" % (
+ type_.day_precision is not None and
+ "(%d)" % type_.day_precision or
+ "",
+ type_.second_precision is not None and
+ "(%d)" % type_.second_precision or
+ "",
+ )
+
+ def visit_LONG(self, type_, **kw):
+ return "LONG"
+
+ def visit_TIMESTAMP(self, type_, **kw):
+ if type_.timezone:
+ return "TIMESTAMP WITH TIME ZONE"
+ else:
+ return "TIMESTAMP"
+
+ def visit_DOUBLE_PRECISION(self, type_, **kw):
+ return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
+
+ def visit_NUMBER(self, type_, **kw):
+ return self._generate_numeric(type_, "NUMBER", **kw)
+
+ def _generate_numeric(self, type_, name, precision=None, scale=None, **kw):
+ if precision is None:
+ precision = type_.precision
+
+ if scale is None:
+ scale = getattr(type_, 'scale', None)
+
+ if precision is None:
+ return name
+ elif scale is None:
+ n = "%(name)s(%(precision)s)"
+ return n % {'name': name, 'precision': precision}
+ else:
+ n = "%(name)s(%(precision)s, %(scale)s)"
+ return n % {'name': name, 'precision': precision, 'scale': scale}
+
+ def visit_string(self, type_, **kw):
+ return self.visit_VARCHAR2(type_, **kw)
+
+ def visit_VARCHAR2(self, type_, **kw):
+ return self._visit_varchar(type_, '', '2')
+
+ def visit_NVARCHAR2(self, type_, **kw):
+ return self._visit_varchar(type_, 'N', '2')
+ visit_NVARCHAR = visit_NVARCHAR2
+
+ def visit_VARCHAR(self, type_, **kw):
+ return self._visit_varchar(type_, '', '')
+
+ def _visit_varchar(self, type_, n, num):
+ if not type_.length:
+ return "%(n)sVARCHAR%(two)s" % {'two': num, 'n': n}
+ elif not n and self.dialect._supports_char_length:
+ varchar = "VARCHAR%(two)s(%(length)s CHAR)"
+ return varchar % {'length': type_.length, 'two': num}
+ else:
+ varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
+ return varchar % {'length': type_.length, 'two': num, 'n': n}
+
+ def visit_text(self, type_, **kw):
+ return self.visit_CLOB(type_, **kw)
+
+ def visit_unicode_text(self, type_, **kw):
+ if self.dialect._supports_nchar:
+ return self.visit_NCLOB(type_, **kw)
+ else:
+ return self.visit_CLOB(type_, **kw)
+
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BLOB(type_, **kw)
+
+ def visit_big_integer(self, type_, **kw):
+ return self.visit_NUMBER(type_, precision=19, **kw)
+
+ def visit_boolean(self, type_, **kw):
+ return self.visit_SMALLINT(type_, **kw)
+
+ def visit_RAW(self, type_, **kw):
+ if type_.length:
+ return "RAW(%(length)s)" % {'length': type_.length}
+ else:
+ return "RAW"
+
+ def visit_ROWID(self, type_, **kw):
+ return "ROWID"
+
+
+class OracleCompiler(compiler.SQLCompiler):
+ """Oracle compiler modifies the lexical structure of Select
+ statements to work under non-ANSI configured Oracle databases, if
+ the use_ansi flag is False.
+ """
+
+ compound_keywords = util.update_copy(
+ compiler.SQLCompiler.compound_keywords,
+ {
+ expression.CompoundSelect.EXCEPT: 'MINUS'
+ }
+ )
+
+ def __init__(self, *args, **kwargs):
+ self.__wheres = {}
+ self._quoted_bind_names = {}
+ super(OracleCompiler, self).__init__(*args, **kwargs)
+
+ def visit_mod_binary(self, binary, operator, **kw):
+ return "mod(%s, %s)" % (self.process(binary.left, **kw),
+ self.process(binary.right, **kw))
+
+ def visit_now_func(self, fn, **kw):
+ return "CURRENT_TIMESTAMP"
+
+ def visit_char_length_func(self, fn, **kw):
+ return "LENGTH" + self.function_argspec(fn, **kw)
+
+ def visit_match_op_binary(self, binary, operator, **kw):
+ return "CONTAINS (%s, %s)" % (self.process(binary.left),
+ self.process(binary.right))
+
+ def visit_true(self, expr, **kw):
+ return '1'
+
+ def visit_false(self, expr, **kw):
+ return '0'
+
+ def get_cte_preamble(self, recursive):
+ return "WITH"
+
+ def get_select_hint_text(self, byfroms):
+ return " ".join(
+ "/*+ %s */" % text for table, text in byfroms.items()
+ )
+
+ def function_argspec(self, fn, **kw):
+ if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
+ return compiler.SQLCompiler.function_argspec(self, fn, **kw)
+ else:
+ return ""
+
+ def default_from(self):
+ """Called when a ``SELECT`` statement has no froms,
+ and no ``FROM`` clause is to be appended.
+
+ The Oracle compiler tacks a "FROM DUAL" to the statement.
+ """
+
+ return " FROM DUAL"
+
+ def visit_join(self, join, **kwargs):
+ if self.dialect.use_ansi:
+ return compiler.SQLCompiler.visit_join(self, join, **kwargs)
+ else:
+ kwargs['asfrom'] = True
+ if isinstance(join.right, expression.FromGrouping):
+ right = join.right.element
+ else:
+ right = join.right
+ return self.process(join.left, **kwargs) + \
+ ", " + self.process(right, **kwargs)
+
+ def _get_nonansi_join_whereclause(self, froms):
+ clauses = []
+
+ def visit_join(join):
+ if join.isouter:
+ def visit_binary(binary):
+ if binary.operator == sql_operators.eq:
+ if join.right.is_derived_from(binary.left.table):
+ binary.left = _OuterJoinColumn(binary.left)
+ elif join.right.is_derived_from(binary.right.table):
+ binary.right = _OuterJoinColumn(binary.right)
+ clauses.append(visitors.cloned_traverse(
+ join.onclause, {}, {'binary': visit_binary}))
+ else:
+ clauses.append(join.onclause)
+
+ for j in join.left, join.right:
+ if isinstance(j, expression.Join):
+ visit_join(j)
+ elif isinstance(j, expression.FromGrouping):
+ visit_join(j.element)
+
+ for f in froms:
+ if isinstance(f, expression.Join):
+ visit_join(f)
+
+ if not clauses:
+ return None
+ else:
+ return sql.and_(*clauses)
+
+ def visit_outer_join_column(self, vc, **kw):
+ return self.process(vc.column, **kw) + "(+)"
+
+ def visit_sequence(self, seq):
+ return (self.dialect.identifier_preparer.format_sequence(seq) +
+ ".nextval")
+
+ def get_render_as_alias_suffix(self, alias_name_text):
+ """Oracle doesn't like ``FROM table AS alias``"""
+
+ return " " + alias_name_text
+
+ def returning_clause(self, stmt, returning_cols):
+ columns = []
+ binds = []
+ for i, column in enumerate(
+ expression._select_iterables(returning_cols)):
+ if column.type._has_column_expression:
+ col_expr = column.type.column_expression(column)
+ else:
+ col_expr = column
+ outparam = sql.outparam("ret_%d" % i, type_=column.type)
+ self.binds[outparam.key] = outparam
+ binds.append(
+ self.bindparam_string(self._truncate_bindparam(outparam)))
+ columns.append(
+ self.process(col_expr, within_columns_clause=False))
+
+ self._add_to_result_map(
+ outparam.key, outparam.key,
+ (column, getattr(column, 'name', None),
+ getattr(column, 'key', None)),
+ column.type
+ )
+
+ return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
+
+ def _TODO_visit_compound_select(self, select):
+ """Need to determine how to get ``LIMIT``/``OFFSET`` into a
+ ``UNION`` for Oracle.
+ """
+ pass
+
+ def visit_select(self, select, **kwargs):
+ """Look for ``LIMIT`` and OFFSET in a select statement, and if
+ so tries to wrap it in a subquery with ``rownum`` criterion.
+ """
+
+ if not getattr(select, '_oracle_visit', None):
+ if not self.dialect.use_ansi:
+ froms = self._display_froms_for_select(
+ select, kwargs.get('asfrom', False))
+ whereclause = self._get_nonansi_join_whereclause(froms)
+ if whereclause is not None:
+ select = select.where(whereclause)
+ select._oracle_visit = True
+
+ limit_clause = select._limit_clause
+ offset_clause = select._offset_clause
+ if limit_clause is not None or offset_clause is not None:
+ # See http://www.oracle.com/technology/oramag/oracle/06-sep/\
+ # o56asktom.html
+ #
+ # Generalized form of an Oracle pagination query:
+ # select ... from (
+ # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from
+ # ( select distinct ... where ... order by ...
+ # ) where ROWNUM <= :limit+:offset
+ # ) where ora_rn > :offset
+ # Outer select and "ROWNUM as ora_rn" can be dropped if
+ # limit=0
+
+ kwargs['select_wraps_for'] = select
+ select = select._generate()
+ select._oracle_visit = True
+
+ # Wrap the middle select and add the hint
+ limitselect = sql.select([c for c in select.c])
+ if limit_clause is not None and \
+ self.dialect.optimize_limits and \
+ select._simple_int_limit:
+ limitselect = limitselect.prefix_with(
+ "/*+ FIRST_ROWS(%d) */" %
+ select._limit)
+
+ limitselect._oracle_visit = True
+ limitselect._is_wrapper = True
+
+ # add expressions to accommodate FOR UPDATE OF
+ for_update = select._for_update_arg
+ if for_update is not None and for_update.of:
+ for_update = for_update._clone()
+ for_update._copy_internals()
+
+ for elem in for_update.of:
+ select.append_column(elem)
+
+ adapter = sql_util.ClauseAdapter(select)
+ for_update.of = [
+ adapter.traverse(elem)
+ for elem in for_update.of]
+
+ # If needed, add the limiting clause
+ if limit_clause is not None:
+ if not self.dialect.use_binds_for_limits:
+ # use simple int limits, will raise an exception
+ # if the limit isn't specified this way
+ max_row = select._limit
+
+ if offset_clause is not None:
+ max_row += select._offset
+ max_row = sql.literal_column("%d" % max_row)
+ else:
+ max_row = limit_clause
+ if offset_clause is not None:
+ max_row = max_row + offset_clause
+ limitselect.append_whereclause(
+ sql.literal_column("ROWNUM") <= max_row)
+
+ # If needed, add the ora_rn, and wrap again with offset.
+ if offset_clause is None:
+ limitselect._for_update_arg = for_update
+ select = limitselect
+ else:
+ limitselect = limitselect.column(
+ sql.literal_column("ROWNUM").label("ora_rn"))
+ limitselect._oracle_visit = True
+ limitselect._is_wrapper = True
+
+ offsetselect = sql.select(
+ [c for c in limitselect.c if c.key != 'ora_rn'])
+ offsetselect._oracle_visit = True
+ offsetselect._is_wrapper = True
+
+ if for_update is not None and for_update.of:
+ for elem in for_update.of:
+ if limitselect.corresponding_column(elem) is None:
+ limitselect.append_column(elem)
+
+ if not self.dialect.use_binds_for_limits:
+ offset_clause = sql.literal_column(
+ "%d" % select._offset)
+ offsetselect.append_whereclause(
+ sql.literal_column("ora_rn") > offset_clause)
+
+ offsetselect._for_update_arg = for_update
+ select = offsetselect
+
+ return compiler.SQLCompiler.visit_select(self, select, **kwargs)
+
+ def limit_clause(self, select, **kw):
+ return ""
+
+ def for_update_clause(self, select, **kw):
+ if self.is_subquery():
+ return ""
+
+ tmp = ' FOR UPDATE'
+
+ if select._for_update_arg.of:
+ tmp += ' OF ' + ', '.join(
+ self.process(elem, **kw) for elem in
+ select._for_update_arg.of
+ )
+
+ if select._for_update_arg.nowait:
+ tmp += " NOWAIT"
+ if select._for_update_arg.skip_locked:
+ tmp += " SKIP LOCKED"
+
+ return tmp
+
+
+class OracleDDLCompiler(compiler.DDLCompiler):
+
+ def define_constraint_cascades(self, constraint):
+ text = ""
+ if constraint.ondelete is not None:
+ text += " ON DELETE %s" % constraint.ondelete
+
+ # oracle has no ON UPDATE CASCADE -
+ # its only available via triggers
+ # http://asktom.oracle.com/tkyte/update_cascade/index.html
+ if constraint.onupdate is not None:
+ util.warn(
+ "Oracle does not contain native UPDATE CASCADE "
+ "functionality - onupdates will not be rendered for foreign "
+ "keys. Consider using deferrable=True, initially='deferred' "
+ "or triggers.")
+
+ return text
+
+ def visit_create_index(self, create):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ if index.dialect_options['oracle']['bitmap']:
+ text += "BITMAP "
+ text += "INDEX %s ON %s (%s)" % (
+ self._prepared_index_name(index, include_schema=True),
+ preparer.format_table(index.table, use_schema=True),
+ ', '.join(
+ self.sql_compiler.process(
+ expr,
+ include_table=False, literal_binds=True)
+ for expr in index.expressions)
+ )
+ if index.dialect_options['oracle']['compress'] is not False:
+ if index.dialect_options['oracle']['compress'] is True:
+ text += " COMPRESS"
+ else:
+ text += " COMPRESS %d" % (
+ index.dialect_options['oracle']['compress']
+ )
+ return text
+
+ def post_create_table(self, table):
+ table_opts = []
+ opts = table.dialect_options['oracle']
+
+ if opts['on_commit']:
+ on_commit_options = opts['on_commit'].replace("_", " ").upper()
+ table_opts.append('\n ON COMMIT %s' % on_commit_options)
+
+ if opts['compress']:
+ if opts['compress'] is True:
+ table_opts.append("\n COMPRESS")
+ else:
+ table_opts.append("\n COMPRESS FOR %s" % (
+ opts['compress']
+ ))
+
+ return ''.join(table_opts)
+
+
+class OracleIdentifierPreparer(compiler.IdentifierPreparer):
+
+ reserved_words = set([x.lower() for x in RESERVED_WORDS])
+ illegal_initial_characters = set(
+ (str(dig) for dig in range(0, 10))).union(["_", "$"])
+
+ def _bindparam_requires_quotes(self, value):
+ """Return True if the given identifier requires quoting."""
+ lc_value = value.lower()
+ return (lc_value in self.reserved_words
+ or value[0] in self.illegal_initial_characters
+ or not self.legal_characters.match(util.text_type(value))
+ )
+
+ def format_savepoint(self, savepoint):
+ name = savepoint.ident.lstrip('_')
+ return super(
+ OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
+
+
+class OracleExecutionContext(default.DefaultExecutionContext):
+ def fire_sequence(self, seq, type_):
+ return self._execute_scalar(
+ "SELECT " +
+ self.dialect.identifier_preparer.format_sequence(seq) +
+ ".nextval FROM DUAL", type_)
+
+
+class OracleDialect(default.DefaultDialect):
+ name = 'oracle'
+ supports_alter = True
+ supports_unicode_statements = False
+ supports_unicode_binds = False
+ max_identifier_length = 30
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = False
+
+ supports_simple_order_by_label = False
+
+ supports_sequences = True
+ sequences_optional = False
+ postfetch_lastrowid = False
+
+ default_paramstyle = 'named'
+ colspecs = colspecs
+ ischema_names = ischema_names
+ requires_name_normalize = True
+
+ supports_default_values = False
+ supports_empty_insert = False
+
+ statement_compiler = OracleCompiler
+ ddl_compiler = OracleDDLCompiler
+ type_compiler = OracleTypeCompiler
+ preparer = OracleIdentifierPreparer
+ execution_ctx_cls = OracleExecutionContext
+
+ reflection_options = ('oracle_resolve_synonyms', )
+
+ construct_arguments = [
+ (sa_schema.Table, {
+ "resolve_synonyms": False,
+ "on_commit": None,
+ "compress": False
+ }),
+ (sa_schema.Index, {
+ "bitmap": False,
+ "compress": False
+ })
+ ]
+
+ def __init__(self,
+ use_ansi=True,
+ optimize_limits=False,
+ use_binds_for_limits=True,
+ exclude_tablespaces=('SYSTEM', 'SYSAUX', ),
+ **kwargs):
+ default.DefaultDialect.__init__(self, **kwargs)
+ self.use_ansi = use_ansi
+ self.optimize_limits = optimize_limits
+ self.use_binds_for_limits = use_binds_for_limits
+ self.exclude_tablespaces = exclude_tablespaces
+
+ def initialize(self, connection):
+ super(OracleDialect, self).initialize(connection)
+ self.implicit_returning = self.__dict__.get(
+ 'implicit_returning',
+ self.server_version_info > (10, )
+ )
+
+ if self._is_oracle_8:
+ self.colspecs = self.colspecs.copy()
+ self.colspecs.pop(sqltypes.Interval)
+ self.use_ansi = False
+
+ @property
+ def _is_oracle_8(self):
+ return self.server_version_info and \
+ self.server_version_info < (9, )
+
+ @property
+ def _supports_table_compression(self):
+ return self.server_version_info and \
+ self.server_version_info >= (10, 1, )
+
+ @property
+ def _supports_table_compress_for(self):
+ return self.server_version_info and \
+ self.server_version_info >= (11, )
+
+ @property
+ def _supports_char_length(self):
+ return not self._is_oracle_8
+
+ @property
+ def _supports_nchar(self):
+ return not self._is_oracle_8
+
+ def do_release_savepoint(self, connection, name):
+ # Oracle does not support RELEASE SAVEPOINT
+ pass
+
+ def has_table(self, connection, table_name, schema=None):
+ if not schema:
+ schema = self.default_schema_name
+ cursor = connection.execute(
+ sql.text("SELECT table_name FROM all_tables "
+ "WHERE table_name = :name AND owner = :schema_name"),
+ name=self.denormalize_name(table_name),
+ schema_name=self.denormalize_name(schema))
+ return cursor.first() is not None
+
+ def has_sequence(self, connection, sequence_name, schema=None):
+ if not schema:
+ schema = self.default_schema_name
+ cursor = connection.execute(
+ sql.text("SELECT sequence_name FROM all_sequences "
+ "WHERE sequence_name = :name AND "
+ "sequence_owner = :schema_name"),
+ name=self.denormalize_name(sequence_name),
+ schema_name=self.denormalize_name(schema))
+ return cursor.first() is not None
+
+ def normalize_name(self, name):
+ if name is None:
+ return None
+ if util.py2k:
+ if isinstance(name, str):
+ name = name.decode(self.encoding)
+ if name.upper() == name and not \
+ self.identifier_preparer._requires_quotes(name.lower()):
+ return name.lower()
+ elif name.lower() == name:
+ return quoted_name(name, quote=True)
+ else:
+ return name
+
+ def denormalize_name(self, name):
+ if name is None:
+ return None
+ elif name.lower() == name and not \
+ self.identifier_preparer._requires_quotes(name.lower()):
+ name = name.upper()
+ if util.py2k:
+ if not self.supports_unicode_binds:
+ name = name.encode(self.encoding)
+ else:
+ name = unicode(name)
+ return name
+
+ def _get_default_schema_name(self, connection):
+ return self.normalize_name(
+ connection.execute('SELECT USER FROM DUAL').scalar())
+
+ def _resolve_synonym(self, connection, desired_owner=None,
+ desired_synonym=None, desired_table=None):
+ """search for a local synonym matching the given desired owner/name.
+
+ if desired_owner is None, attempts to locate a distinct owner.
+
+ returns the actual name, owner, dblink name, and synonym name if
+ found.
+ """
+
+ q = "SELECT owner, table_owner, table_name, db_link, "\
+ "synonym_name FROM all_synonyms WHERE "
+ clauses = []
+ params = {}
+ if desired_synonym:
+ clauses.append("synonym_name = :synonym_name")
+ params['synonym_name'] = desired_synonym
+ if desired_owner:
+ clauses.append("owner = :desired_owner")
+ params['desired_owner'] = desired_owner
+ if desired_table:
+ clauses.append("table_name = :tname")
+ params['tname'] = desired_table
+
+ q += " AND ".join(clauses)
+
+ result = connection.execute(sql.text(q), **params)
+ if desired_owner:
+ row = result.first()
+ if row:
+ return (row['table_name'], row['table_owner'],
+ row['db_link'], row['synonym_name'])
+ else:
+ return None, None, None, None
+ else:
+ rows = result.fetchall()
+ if len(rows) > 1:
+ raise AssertionError(
+ "There are multiple tables visible to the schema, you "
+ "must specify owner")
+ elif len(rows) == 1:
+ row = rows[0]
+ return (row['table_name'], row['table_owner'],
+ row['db_link'], row['synonym_name'])
+ else:
+ return None, None, None, None
+
+ @reflection.cache
+ def _prepare_reflection_args(self, connection, table_name, schema=None,
+ resolve_synonyms=False, dblink='', **kw):
+
+ if resolve_synonyms:
+ actual_name, owner, dblink, synonym = self._resolve_synonym(
+ connection,
+ desired_owner=self.denormalize_name(schema),
+ desired_synonym=self.denormalize_name(table_name)
+ )
+ else:
+ actual_name, owner, dblink, synonym = None, None, None, None
+ if not actual_name:
+ actual_name = self.denormalize_name(table_name)
+
+ if dblink:
+ # using user_db_links here since all_db_links appears
+ # to have more restricted permissions.
+ # http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
+ # will need to hear from more users if we are doing
+ # the right thing here. See [ticket:2619]
+ owner = connection.scalar(
+ sql.text("SELECT username FROM user_db_links "
+ "WHERE db_link=:link"), link=dblink)
+ dblink = "@" + dblink
+ elif not owner:
+ owner = self.denormalize_name(schema or self.default_schema_name)
+
+ return (actual_name, owner, dblink or '', synonym)
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+ s = "SELECT username FROM all_users ORDER BY username"
+ cursor = connection.execute(s,)
+ return [self.normalize_name(row[0]) for row in cursor]
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ schema = self.denormalize_name(schema or self.default_schema_name)
+
+ # note that table_names() isn't loading DBLINKed or synonym'ed tables
+ if schema is None:
+ schema = self.default_schema_name
+
+ sql_str = "SELECT table_name FROM all_tables WHERE "
+ if self.exclude_tablespaces:
+ sql_str += (
+ "nvl(tablespace_name, 'no tablespace') "
+ "NOT IN (%s) AND " % (
+ ', '.join(["'%s'" % ts for ts in self.exclude_tablespaces])
+ )
+ )
+ sql_str += (
+ "OWNER = :owner "
+ "AND IOT_NAME IS NULL "
+ "AND DURATION IS NULL")
+
+ cursor = connection.execute(sql.text(sql_str), owner=schema)
+ return [self.normalize_name(row[0]) for row in cursor]
+
+ @reflection.cache
+ def get_temp_table_names(self, connection, **kw):
+ schema = self.denormalize_name(self.default_schema_name)
+
+ sql_str = "SELECT table_name FROM all_tables WHERE "
+ if self.exclude_tablespaces:
+ sql_str += (
+ "nvl(tablespace_name, 'no tablespace') "
+ "NOT IN (%s) AND " % (
+ ', '.join(["'%s'" % ts for ts in self.exclude_tablespaces])
+ )
+ )
+ sql_str += (
+ "OWNER = :owner "
+ "AND IOT_NAME IS NULL "
+ "AND DURATION IS NOT NULL")
+
+ cursor = connection.execute(sql.text(sql_str), owner=schema)
+ return [self.normalize_name(row[0]) for row in cursor]
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ schema = self.denormalize_name(schema or self.default_schema_name)
+ s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
+ cursor = connection.execute(s, owner=self.denormalize_name(schema))
+ return [self.normalize_name(row[0]) for row in cursor]
+
+ @reflection.cache
+ def get_table_options(self, connection, table_name, schema=None, **kw):
+ options = {}
+
+ resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
+ dblink = kw.get('dblink', '')
+ info_cache = kw.get('info_cache')
+
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+
+ params = {"table_name": table_name}
+
+ columns = ["table_name"]
+ if self._supports_table_compression:
+ columns.append("compression")
+ if self._supports_table_compress_for:
+ columns.append("compress_for")
+
+ text = "SELECT %(columns)s "\
+ "FROM ALL_TABLES%(dblink)s "\
+ "WHERE table_name = :table_name"
+
+ if schema is not None:
+ params['owner'] = schema
+ text += " AND owner = :owner "
+ text = text % {'dblink': dblink, 'columns': ", ".join(columns)}
+
+ result = connection.execute(sql.text(text), **params)
+
+ enabled = dict(DISABLED=False, ENABLED=True)
+
+ row = result.first()
+ if row:
+ if "compression" in row and enabled.get(row.compression, False):
+ if "compress_for" in row:
+ options['oracle_compress'] = row.compress_for
+ else:
+ options['oracle_compress'] = True
+
+ return options
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ """
+
+ kw arguments can be:
+
+ oracle_resolve_synonyms
+
+ dblink
+
+ """
+
+ resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
+ dblink = kw.get('dblink', '')
+ info_cache = kw.get('info_cache')
+
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+ columns = []
+ if self._supports_char_length:
+ char_length_col = 'char_length'
+ else:
+ char_length_col = 'data_length'
+
+ params = {"table_name": table_name}
+ text = "SELECT column_name, data_type, %(char_length_col)s, "\
+ "data_precision, data_scale, "\
+ "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\
+ "WHERE table_name = :table_name"
+ if schema is not None:
+ params['owner'] = schema
+ text += " AND owner = :owner "
+ text += " ORDER BY column_id"
+ text = text % {'dblink': dblink, 'char_length_col': char_length_col}
+
+ c = connection.execute(sql.text(text), **params)
+
+ for row in c:
+ (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
+ (self.normalize_name(row[0]), row[0], row[1], row[
+ 2], row[3], row[4], row[5] == 'Y', row[6])
+
+ if coltype == 'NUMBER':
+ coltype = NUMBER(precision, scale)
+ elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
+ coltype = self.ischema_names.get(coltype)(length)
+ elif 'WITH TIME ZONE' in coltype:
+ coltype = TIMESTAMP(timezone=True)
+ else:
+ coltype = re.sub(r'\(\d+\)', '', coltype)
+ try:
+ coltype = self.ischema_names[coltype]
+ except KeyError:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (coltype, colname))
+ coltype = sqltypes.NULLTYPE
+
+ cdict = {
+ 'name': colname,
+ 'type': coltype,
+ 'nullable': nullable,
+ 'default': default,
+ 'autoincrement': 'auto',
+ }
+ if orig_colname.lower() == orig_colname:
+ cdict['quote'] = True
+
+ columns.append(cdict)
+ return columns
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None,
+ resolve_synonyms=False, dblink='', **kw):
+
+ info_cache = kw.get('info_cache')
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+ indexes = []
+
+ params = {'table_name': table_name}
+ text = \
+ "SELECT a.index_name, a.column_name, "\
+ "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\
+ "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\
+ "\nALL_INDEXES%(dblink)s b "\
+ "\nWHERE "\
+ "\na.index_name = b.index_name "\
+ "\nAND a.table_owner = b.table_owner "\
+ "\nAND a.table_name = b.table_name "\
+ "\nAND a.table_name = :table_name "
+
+ if schema is not None:
+ params['schema'] = schema
+ text += "AND a.table_owner = :schema "
+
+ text += "ORDER BY a.index_name, a.column_position"
+
+ text = text % {'dblink': dblink}
+
+ q = sql.text(text)
+ rp = connection.execute(q, **params)
+ indexes = []
+ last_index_name = None
+ pk_constraint = self.get_pk_constraint(
+ connection, table_name, schema, resolve_synonyms=resolve_synonyms,
+ dblink=dblink, info_cache=kw.get('info_cache'))
+ pkeys = pk_constraint['constrained_columns']
+ uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
+ enabled = dict(DISABLED=False, ENABLED=True)
+
+ oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
+
+ def upper_name_set(names):
+ return set([i.upper() for i in names])
+
+ pk_names = upper_name_set(pkeys)
+
+ def remove_if_primary_key(index):
+ # don't include the primary key index
+ if index is not None and \
+ upper_name_set(index['column_names']) == pk_names:
+ indexes.pop()
+
+ index = None
+ for rset in rp:
+ if rset.index_name != last_index_name:
+ remove_if_primary_key(index)
+ index = dict(name=self.normalize_name(rset.index_name),
+ column_names=[], dialect_options={})
+ indexes.append(index)
+ index['unique'] = uniqueness.get(rset.uniqueness, False)
+
+ if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'):
+ index['dialect_options']['oracle_bitmap'] = True
+ if enabled.get(rset.compression, False):
+ index['dialect_options']['oracle_compress'] = rset.prefix_length
+
+ # filter out Oracle SYS_NC names. could also do an outer join
+ # to the all_tab_columns table and check for real col names there.
+ if not oracle_sys_col.match(rset.column_name):
+ index['column_names'].append(
+ self.normalize_name(rset.column_name))
+ last_index_name = rset.index_name
+ remove_if_primary_key(index)
+ return indexes
+
+ @reflection.cache
+ def _get_constraint_data(self, connection, table_name, schema=None,
+ dblink='', **kw):
+
+ params = {'table_name': table_name}
+
+ text = \
+ "SELECT"\
+ "\nac.constraint_name,"\
+ "\nac.constraint_type,"\
+ "\nloc.column_name AS local_column,"\
+ "\nrem.table_name AS remote_table,"\
+ "\nrem.column_name AS remote_column,"\
+ "\nrem.owner AS remote_owner,"\
+ "\nloc.position as loc_pos,"\
+ "\nrem.position as rem_pos"\
+ "\nFROM all_constraints%(dblink)s ac,"\
+ "\nall_cons_columns%(dblink)s loc,"\
+ "\nall_cons_columns%(dblink)s rem"\
+ "\nWHERE ac.table_name = :table_name"\
+ "\nAND ac.constraint_type IN ('R','P')"
+
+ if schema is not None:
+ params['owner'] = schema
+ text += "\nAND ac.owner = :owner"
+
+ text += \
+ "\nAND ac.owner = loc.owner"\
+ "\nAND ac.constraint_name = loc.constraint_name"\
+ "\nAND ac.r_owner = rem.owner(+)"\
+ "\nAND ac.r_constraint_name = rem.constraint_name(+)"\
+ "\nAND (rem.position IS NULL or loc.position=rem.position)"\
+ "\nORDER BY ac.constraint_name, loc.position"
+
+ text = text % {'dblink': dblink}
+ rp = connection.execute(sql.text(text), **params)
+ constraint_data = rp.fetchall()
+ return constraint_data
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
+ dblink = kw.get('dblink', '')
+ info_cache = kw.get('info_cache')
+
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+ pkeys = []
+ constraint_name = None
+ constraint_data = self._get_constraint_data(
+ connection, table_name, schema, dblink,
+ info_cache=kw.get('info_cache'))
+
+ for row in constraint_data:
+ (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
+ row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
+ if cons_type == 'P':
+ if constraint_name is None:
+ constraint_name = self.normalize_name(cons_name)
+ pkeys.append(local_column)
+ return {'constrained_columns': pkeys, 'name': constraint_name}
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+ """
+
+ kw arguments can be:
+
+ oracle_resolve_synonyms
+
+ dblink
+
+ """
+
+ requested_schema = schema # to check later on
+ resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
+ dblink = kw.get('dblink', '')
+ info_cache = kw.get('info_cache')
+
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+
+ constraint_data = self._get_constraint_data(
+ connection, table_name, schema, dblink,
+ info_cache=kw.get('info_cache'))
+
+ def fkey_rec():
+ return {
+ 'name': None,
+ 'constrained_columns': [],
+ 'referred_schema': None,
+ 'referred_table': None,
+ 'referred_columns': []
+ }
+
+ fkeys = util.defaultdict(fkey_rec)
+
+ for row in constraint_data:
+ (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
+ row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
+
+ if cons_type == 'R':
+ if remote_table is None:
+ # ticket 363
+ util.warn(
+ ("Got 'None' querying 'table_name' from "
+ "all_cons_columns%(dblink)s - does the user have "
+ "proper rights to the table?") % {'dblink': dblink})
+ continue
+
+ rec = fkeys[cons_name]
+ rec['name'] = cons_name
+ local_cols, remote_cols = rec[
+ 'constrained_columns'], rec['referred_columns']
+
+ if not rec['referred_table']:
+ if resolve_synonyms:
+ ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
+ self._resolve_synonym(
+ connection,
+ desired_owner=self.denormalize_name(
+ remote_owner),
+ desired_table=self.denormalize_name(
+ remote_table)
+ )
+ if ref_synonym:
+ remote_table = self.normalize_name(ref_synonym)
+ remote_owner = self.normalize_name(
+ ref_remote_owner)
+
+ rec['referred_table'] = remote_table
+
+ if requested_schema is not None or \
+ self.denormalize_name(remote_owner) != schema:
+ rec['referred_schema'] = remote_owner
+
+ local_cols.append(local_column)
+ remote_cols.append(remote_column)
+
+ return list(fkeys.values())
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None,
+ resolve_synonyms=False, dblink='', **kw):
+ info_cache = kw.get('info_cache')
+ (view_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, view_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+
+ params = {'view_name': view_name}
+ text = "SELECT text FROM all_views WHERE view_name=:view_name"
+
+ if schema is not None:
+ text += " AND owner = :schema"
+ params['schema'] = schema
+
+ rp = connection.execute(sql.text(text), **params).scalar()
+ if rp:
+ if util.py2k:
+ rp = rp.decode(self.encoding)
+ return rp
+ else:
+ return None
+
+
+class _OuterJoinColumn(sql.ClauseElement):
+ __visit_name__ = 'outer_join_column'
+
+ def __init__(self, column):
+ self.column = column
diff --git a/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py
new file mode 100644
index 0000000..f85324f
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -0,0 +1,1020 @@
+# oracle/cx_oracle.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: oracle+cx_oracle
+ :name: cx-Oracle
+ :dbapi: cx_oracle
+ :connectstring: oracle+cx_oracle://user:pass@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://cx-oracle.sourceforge.net/
+
+Additional Connect Arguments
+----------------------------
+
+When connecting with ``dbname`` present, the host, port, and dbname tokens are
+converted to a TNS name using
+the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
+directly as a TNS name.
+
+Additional arguments which may be specified either as query string arguments
+on the URL, or as keyword arguments to :func:`.create_engine()` are:
+
+* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
+
+* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
+ to 50. This setting is significant with cx_Oracle as the contents of LOB
+ objects are only readable within a "live" row (e.g. within a batch of
+ 50 rows).
+
+* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
+
+* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
+ all bind parameters. This is required for LOB datatypes but can be
+ disabled to reduce overhead. Defaults to ``True``. Specific types
+ can be excluded from this process using the ``exclude_setinputsizes``
+ parameter.
+
+* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
+
+* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
+
+* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
+ be excluded from the "auto setinputsizes" feature. The type names here
+ must match DBAPI types that are found in the "cx_Oracle" module namespace,
+ such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
+ ``(STRING, UNICODE)``.
+
+ .. versionadded:: 0.8 specific DBAPI types can be excluded from the
+ auto_setinputsizes feature via the exclude_setinputsizes attribute.
+
+* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or
+ alternatively an integer value. This value is only available as a URL query
+ string argument.
+
+* ``threaded`` - enable multithreaded access to cx_oracle connections.
+ Defaults to ``True``. Note that this is the opposite default of the
+ cx_Oracle DBAPI itself.
+
+* ``service_name`` - An option to use connection string (DSN) with
+ ``SERVICE_NAME`` instead of ``SID``. It can't be passed when a ``database``
+ part is given.
+ E.g. ``oracle+cx_oracle://scott:tiger@host:1521/?service_name=hr``
+ is a valid url. This value is only available as a URL query string argument.
+
+ .. versionadded:: 1.0.0
+
+.. _cx_oracle_unicode:
+
+Unicode
+-------
+
+The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the
+ability to return string results as Python unicode objects natively.
+
+When used in Python 3, cx_Oracle returns all strings as Python unicode objects
+(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
+unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
+column values that are of type ``VARCHAR`` or other non-unicode string types,
+it will return values as Python strings (e.g. bytestrings).
+
+The cx_Oracle SQLAlchemy dialect presents two different options for the use
+case of returning ``VARCHAR`` column values as Python unicode objects under
+Python 2:
+
+* the cx_Oracle DBAPI has the ability to coerce all string results to Python
+ unicode objects unconditionally using output type handlers. This has
+ the advantage that the unicode conversion is global to all statements
+ at the cx_Oracle driver level, meaning it works with raw textual SQL
+ statements that have no typing information associated. However, this system
+ has been observed to incur signfiicant performance overhead, not only
+ because it takes effect for all string values unconditionally, but also
+ because cx_Oracle under Python 2 seems to use a pure-Python function call in
+ order to do the decode operation, which under cPython can orders of
+ magnitude slower than doing it using C functions alone.
+
+* SQLAlchemy has unicode-decoding services built in, and when using
+ SQLAlchemy's C extensions, these functions do not use any Python function
+ calls and are very fast. The disadvantage to this approach is that the
+ unicode conversion only takes effect for statements where the
+ :class:`.Unicode` type or :class:`.String` type with
+ ``convert_unicode=True`` is explicitly associated with the result column.
+ This is the case for any ORM or Core query or SQL expression as well as for
+ a :func:`.text` construct that specifies output column types, so in the vast
+ majority of cases this is not an issue. However, when sending a completely
+ raw string to :meth:`.Connection.execute`, this typing information isn't
+ present, unless the string is handled within a :func:`.text` construct that
+ adds typing information.
+
+As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
+typing system. This keeps cx_Oracle's expensive Python 2 approach
+disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy
+detects that cx_Oracle is returning unicode objects natively and cx_Oracle's
+system is used.
+
+To re-enable cx_Oracle's output type handler under Python 2, the
+``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
+:func:`.create_engine`::
+
+ engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
+
+Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
+as Python unicode under Python 2 without using cx_Oracle's native handlers,
+the :func:`.text` feature can be used::
+
+ from sqlalchemy import text, Unicode
+ result = conn.execute(
+ text("select username from user").columns(username=Unicode))
+
+.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used
+ for unicode results of non-unicode datatypes in Python 2, after they were
+ identified as a major performance bottleneck. SQLAlchemy's own unicode
+ facilities are used instead.
+
+.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
+ cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
+
+.. _cx_oracle_returning:
+
+RETURNING Support
+-----------------
+
+The cx_oracle DBAPI supports a limited subset of Oracle's already limited
+RETURNING support. Typically, results can only be guaranteed for at most one
+column being returned; this is the typical case when SQLAlchemy uses RETURNING
+to get just the value of a primary-key-associated sequence value.
+Additional column expressions will cause problems in a non-determinative way,
+due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is
+required for more complex RETURNING scenarios.
+
+For this reason, stability may be enhanced by disabling RETURNING support
+completely; SQLAlchemy otherwise will use RETURNING to fetch newly
+sequence-generated primary keys. As illustrated in :ref:`oracle_returning`::
+
+ engine = create_engine("oracle://scott:tiger@dsn",
+ implicit_returning=False)
+
+.. seealso::
+
+ http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693
+ - OCI documentation for RETURNING
+
+ http://sourceforge.net/mailarchive/message.php?msg_id=31338136
+ - cx_oracle developer commentary
+
+.. _cx_oracle_lob:
+
+LOB Objects
+-----------
+
+cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
+converts these to strings so that the interface of the Binary type is
+consistent with that of other backends, and so that the linkage to a live
+cursor is not needed in scenarios like result.fetchmany() and
+result.fetchall(). This means that by default, LOB objects are fully fetched
+unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.
+
+To disable this processing, pass ``auto_convert_lobs=False`` to
+:func:`.create_engine()`.
+
+Two Phase Transaction Support
+-----------------------------
+
+Two Phase transactions are implemented using XA transactions, and are known
+to work in a rudimental fashion with recent versions of cx_Oracle
+as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
+considered to be robust and should still be regarded as experimental.
+
+In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
+two phase which prevents
+a particular DBAPI connection from being consistently usable in both
+prepared transactions as well as traditional DBAPI usage patterns; therefore
+once a particular connection is used via :meth:`.Connection.begin_prepared`,
+all subsequent usages of the underlying DBAPI connection must be within
+the context of prepared transactions.
+
+The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
+connections. Therefore, due to the above glitch, a DBAPI connection that has
+been used in a two-phase operation, and is then returned to the pool, will
+not be usable in a non-two-phase context. To avoid this situation,
+the application can make one of several choices:
+
+* Disable connection pooling using :class:`.NullPool`
+
+* Ensure that the particular :class:`.Engine` in use is only used
+ for two-phase operations. A :class:`.Engine` bound to an ORM
+ :class:`.Session` which includes ``twophase=True`` will consistently
+ use the two-phase transaction style.
+
+* For ad-hoc two-phase operations without disabling pooling, the DBAPI
+ connection in use can be evicted from the connection pool using the
+ :meth:`.Connection.detach` method.
+
+.. versionchanged:: 0.8.0b2,0.7.10
+ Support for cx_oracle prepared transactions has been implemented
+ and tested.
+
+.. _cx_oracle_numeric:
+
+Precision Numerics
+------------------
+
+The SQLAlchemy dialect goes through a lot of steps to ensure
+that decimal numbers are sent and received with full accuracy.
+An "outputtypehandler" callable is associated with each
+cx_oracle connection object which detects numeric types and
+receives them as string values, instead of receiving a Python
+``float`` directly, which is then passed to the Python
+``Decimal`` constructor. The :class:`.Numeric` and
+:class:`.Float` types under the cx_oracle dialect are aware of
+this behavior, and will coerce the ``Decimal`` to ``float`` if
+the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
+optional on :class:`.Numeric`).
+
+Because the handler coerces to ``Decimal`` in all cases first,
+the feature can detract significantly from performance.
+If precision numerics aren't required, the decimal handling
+can be disabled by passing the flag ``coerce_to_decimal=False``
+to :func:`.create_engine`::
+
+ engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
+
+.. versionadded:: 0.7.6
+ Add the ``coerce_to_decimal`` flag.
+
+Another alternative to performance is to use the
+`cdecimal `_ library;
+see :class:`.Numeric` for additional notes.
+
+The handler attempts to use the "precision" and "scale"
+attributes of the result set column to best determine if
+subsequent incoming values should be received as ``Decimal`` as
+opposed to int (in which case no processing is added). There are
+several scenarios where OCI_ does not provide unambiguous data
+as to the numeric type, including some situations where
+individual rows may return a combination of floating point and
+integer values. Certain values for "precision" and "scale" have
+been observed to determine this scenario. When it occurs, the
+outputtypehandler receives as string and then passes off to a
+processing function which detects, for each returned value, if a
+decimal point is present, and if so converts to ``Decimal``,
+otherwise to int. The intention is that simple int-based
+statements like "SELECT my_seq.nextval() FROM DUAL" continue to
+return ints and not ``Decimal`` objects, and that any kind of
+floating point value is received as a string so that there is no
+floating point loss of precision.
+
+The "decimal point is present" logic itself is also sensitive to
+locale. Under OCI_, this is controlled by the NLS_LANG
+environment variable. Upon first connection, the dialect runs a
+test to determine the current "decimal" character, which can be
+a comma "," for European locales. From that point forward the
+outputtypehandler uses that character to represent a decimal
+point. Note that cx_oracle 5.0.3 or greater is required
+when dealing with numerics with locale settings that don't use
+a period "." as the decimal character.
+
+.. versionchanged:: 0.6.6
+ The outputtypehandler supports the case where the locale uses a
+ comma "," character to represent a decimal point.
+
+.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
+
+"""
+
+from __future__ import absolute_import
+
+from .base import OracleCompiler, OracleDialect, OracleExecutionContext
+from . import base as oracle
+from ...engine import result as _result
+from sqlalchemy import types as sqltypes, util, exc, processors
+from sqlalchemy import util
+import random
+import collections
+import decimal
+import re
+import time
+
+
+class _OracleNumeric(sqltypes.Numeric):
+ def bind_processor(self, dialect):
+ # cx_oracle accepts Decimal objects and floats
+ return None
+
+ def result_processor(self, dialect, coltype):
+ # we apply a cx_oracle type handler to all connections
+ # that converts floating point strings to Decimal().
+ # However, in some subquery situations, Oracle doesn't
+ # give us enough information to determine int or Decimal.
+ # It could even be int/Decimal differently on each row,
+ # regardless of the scale given for the originating type.
+ # So we still need an old school isinstance() handler
+ # here for decimals.
+
+ if dialect.supports_native_decimal:
+ if self.asdecimal:
+ fstring = "%%.%df" % self._effective_decimal_return_scale
+
+ def to_decimal(value):
+ if value is None:
+ return None
+ elif isinstance(value, decimal.Decimal):
+ return value
+ else:
+ return decimal.Decimal(fstring % value)
+
+ return to_decimal
+ else:
+ if self.precision is None and self.scale is None:
+ return processors.to_float
+ elif not getattr(self, '_is_oracle_number', False) \
+ and self.scale is not None:
+ return processors.to_float
+ else:
+ return None
+ else:
+ # cx_oracle 4 behavior, will assume
+ # floats
+ return super(_OracleNumeric, self).\
+ result_processor(dialect, coltype)
+
+
+class _OracleDate(sqltypes.Date):
+ def bind_processor(self, dialect):
+ return None
+
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ if value is not None:
+ return value.date()
+ else:
+ return value
+ return process
+
+
+class _LOBMixin(object):
+ def result_processor(self, dialect, coltype):
+ if not dialect.auto_convert_lobs:
+ # return the cx_oracle.LOB directly.
+ return None
+
+ def process(value):
+ if value is not None:
+ return value.read()
+ else:
+ return value
+ return process
+
+
+class _NativeUnicodeMixin(object):
+ if util.py2k:
+ def bind_processor(self, dialect):
+ if dialect._cx_oracle_with_unicode:
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return unicode(value)
+ return process
+ else:
+ return super(
+ _NativeUnicodeMixin, self).bind_processor(dialect)
+
+ # we apply a connection output handler that returns
+ # unicode in all cases, so the "native_unicode" flag
+ # will be set for the default String.result_processor.
+
+
+class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.FIXED_CHAR
+
+
+class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
+ def get_dbapi_type(self, dbapi):
+ return getattr(dbapi, 'UNICODE', dbapi.STRING)
+
+
+class _OracleText(_LOBMixin, sqltypes.Text):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.CLOB
+
+
+class _OracleLong(oracle.LONG):
+ # a raw LONG is a text type, but does *not*
+ # get the LobMixin with cx_oracle.
+
+ def get_dbapi_type(self, dbapi):
+ return dbapi.LONG_STRING
+
+
+class _OracleString(_NativeUnicodeMixin, sqltypes.String):
+ pass
+
+class _OracleEnum(_NativeUnicodeMixin, sqltypes.Enum):
+ def bind_processor(self, dialect):
+ enum_proc = sqltypes.Enum.bind_processor(self, dialect)
+ if util.py2k:
+ unicode_proc = _NativeUnicodeMixin.bind_processor(self, dialect)
+ else:
+ unicode_proc = None
+
+ def process(value):
+ raw_str = enum_proc(value)
+ if unicode_proc:
+ raw_str = unicode_proc(raw_str)
+ return raw_str
+ return process
+
+
+class _OracleUnicodeText(
+ _LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.NCLOB
+
+ def result_processor(self, dialect, coltype):
+ lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
+ if lob_processor is None:
+ return None
+
+ string_processor = sqltypes.UnicodeText.result_processor(
+ self, dialect, coltype)
+
+ if string_processor is None:
+ return lob_processor
+ else:
+ def process(value):
+ return string_processor(lob_processor(value))
+ return process
+
+
+class _OracleInteger(sqltypes.Integer):
+ def result_processor(self, dialect, coltype):
+ def to_int(val):
+ if val is not None:
+ val = int(val)
+ return val
+ return to_int
+
+
+class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.BLOB
+
+ def bind_processor(self, dialect):
+ return None
+
+
+class _OracleInterval(oracle.INTERVAL):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.INTERVAL
+
+
+class _OracleRaw(oracle.RAW):
+ pass
+
+
+class _OracleRowid(oracle.ROWID):
+ def get_dbapi_type(self, dbapi):
+ return dbapi.ROWID
+
+
+class OracleCompiler_cx_oracle(OracleCompiler):
+ def bindparam_string(self, name, **kw):
+ quote = getattr(name, 'quote', None)
+ if quote is True or quote is not False and \
+ self.preparer._bindparam_requires_quotes(name):
+ quoted_name = '"%s"' % name
+ self._quoted_bind_names[name] = quoted_name
+ return OracleCompiler.bindparam_string(self, quoted_name, **kw)
+ else:
+ return OracleCompiler.bindparam_string(self, name, **kw)
+
+
+class OracleExecutionContext_cx_oracle(OracleExecutionContext):
+
+ def pre_exec(self):
+ quoted_bind_names = \
+ getattr(self.compiled, '_quoted_bind_names', None)
+ if quoted_bind_names:
+ if not self.dialect.supports_unicode_statements:
+ # if DBAPI doesn't accept unicode statements,
+ # keys in self.parameters would have been encoded
+ # here. so convert names in quoted_bind_names
+ # to encoded as well.
+ quoted_bind_names = \
+ dict(
+ (fromname.encode(self.dialect.encoding),
+ toname.encode(self.dialect.encoding))
+ for fromname, toname in
+ quoted_bind_names.items()
+ )
+ for param in self.parameters:
+ for fromname, toname in quoted_bind_names.items():
+ param[toname] = param[fromname]
+ del param[fromname]
+
+ if self.dialect.auto_setinputsizes:
+ # cx_oracle really has issues when you setinputsizes
+ # on String, including that outparams/RETURNING
+ # breaks for varchars
+ self.set_input_sizes(
+ quoted_bind_names,
+ exclude_types=self.dialect.exclude_setinputsizes
+ )
+
+ # if a single execute, check for outparams
+ if len(self.compiled_parameters) == 1:
+ for bindparam in self.compiled.binds.values():
+ if bindparam.isoutparam:
+ dbtype = bindparam.type.dialect_impl(self.dialect).\
+ get_dbapi_type(self.dialect.dbapi)
+ if not hasattr(self, 'out_parameters'):
+ self.out_parameters = {}
+ if dbtype is None:
+ raise exc.InvalidRequestError(
+ "Cannot create out parameter for parameter "
+ "%r - its type %r is not supported by"
+ " cx_oracle" %
+ (bindparam.key, bindparam.type)
+ )
+ name = self.compiled.bind_names[bindparam]
+ self.out_parameters[name] = self.cursor.var(dbtype)
+ self.parameters[0][quoted_bind_names.get(name, name)] = \
+ self.out_parameters[name]
+
+ def create_cursor(self):
+ c = self._dbapi_connection.cursor()
+ if self.dialect.arraysize:
+ c.arraysize = self.dialect.arraysize
+
+ return c
+
+ def get_result_proxy(self):
+ if hasattr(self, 'out_parameters') and self.compiled.returning:
+ returning_params = dict(
+ (k, v.getvalue())
+ for k, v in self.out_parameters.items()
+ )
+ return ReturningResultProxy(self, returning_params)
+
+ result = None
+ if self.cursor.description is not None:
+ for column in self.cursor.description:
+ type_code = column[1]
+ if type_code in self.dialect._cx_oracle_binary_types:
+ result = _result.BufferedColumnResultProxy(self)
+
+ if result is None:
+ result = _result.ResultProxy(self)
+
+ if hasattr(self, 'out_parameters'):
+ if self.compiled_parameters is not None and \
+ len(self.compiled_parameters) == 1:
+ result.out_parameters = out_parameters = {}
+
+ for bind, name in self.compiled.bind_names.items():
+ if name in self.out_parameters:
+ type = bind.type
+ impl_type = type.dialect_impl(self.dialect)
+ dbapi_type = impl_type.get_dbapi_type(
+ self.dialect.dbapi)
+ result_processor = impl_type.\
+ result_processor(self.dialect,
+ dbapi_type)
+ if result_processor is not None:
+ out_parameters[name] = \
+ result_processor(
+ self.out_parameters[name].getvalue())
+ else:
+ out_parameters[name] = self.out_parameters[
+ name].getvalue()
+ else:
+ result.out_parameters = dict(
+ (k, v.getvalue())
+ for k, v in self.out_parameters.items()
+ )
+
+ return result
+
+
+class OracleExecutionContext_cx_oracle_with_unicode(
+ OracleExecutionContext_cx_oracle):
+ """Support WITH_UNICODE in Python 2.xx.
+
+ WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
+ behavior under Python 2.x. This mode in some cases disallows
+ and in other cases silently passes corrupted data when
+ non-Python-unicode strings (a.k.a. plain old Python strings)
+ are passed as arguments to connect(), the statement sent to execute(),
+ or any of the bind parameter keys or values sent to execute().
+ This optional context therefore ensures that all statements are
+ passed as Python unicode objects.
+
+ """
+
+ def __init__(self, *arg, **kw):
+ OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
+ self.statement = util.text_type(self.statement)
+
+ def _execute_scalar(self, stmt, type_):
+ return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
+ _execute_scalar(util.text_type(stmt), type_)
+
+
+class ReturningResultProxy(_result.FullyBufferedResultProxy):
+ """Result proxy which stuffs the _returning clause + outparams
+ into the fetch."""
+
+ def __init__(self, context, returning_params):
+ self._returning_params = returning_params
+ super(ReturningResultProxy, self).__init__(context)
+
+ def _cursor_description(self):
+ returning = self.context.compiled.returning
+ return [
+ ("ret_%d" % i, None)
+ for i, col in enumerate(returning)
+ ]
+
+ def _buffer_rows(self):
+ return collections.deque(
+ [tuple(self._returning_params["ret_%d" % i]
+ for i, c in enumerate(self._returning_params))]
+ )
+
+
+class OracleDialect_cx_oracle(OracleDialect):
+ execution_ctx_cls = OracleExecutionContext_cx_oracle
+ statement_compiler = OracleCompiler_cx_oracle
+
+ driver = "cx_oracle"
+
+ colspecs = colspecs = {
+ sqltypes.Numeric: _OracleNumeric,
+ # generic type, assume datetime.date is desired
+ sqltypes.Date: _OracleDate,
+ sqltypes.LargeBinary: _OracleBinary,
+ sqltypes.Boolean: oracle._OracleBoolean,
+ sqltypes.Interval: _OracleInterval,
+ oracle.INTERVAL: _OracleInterval,
+ sqltypes.Text: _OracleText,
+ sqltypes.String: _OracleString,
+ sqltypes.UnicodeText: _OracleUnicodeText,
+ sqltypes.CHAR: _OracleChar,
+ sqltypes.Enum: _OracleEnum,
+
+ # a raw LONG is a text type, but does *not*
+ # get the LobMixin with cx_oracle.
+ oracle.LONG: _OracleLong,
+
+ # this is only needed for OUT parameters.
+ # it would be nice if we could not use it otherwise.
+ sqltypes.Integer: _OracleInteger,
+
+ oracle.RAW: _OracleRaw,
+ sqltypes.Unicode: _OracleNVarChar,
+ sqltypes.NVARCHAR: _OracleNVarChar,
+ oracle.ROWID: _OracleRowid,
+ }
+
+ execute_sequence_format = list
+
+ def __init__(self,
+ auto_setinputsizes=True,
+ exclude_setinputsizes=("STRING", "UNICODE"),
+ auto_convert_lobs=True,
+ threaded=True,
+ allow_twophase=True,
+ coerce_to_decimal=True,
+ coerce_to_unicode=False,
+ arraysize=50, _retry_on_12516=False,
+ **kwargs):
+ OracleDialect.__init__(self, **kwargs)
+ self.threaded = threaded
+ self.arraysize = arraysize
+ self.allow_twophase = allow_twophase
+ self.supports_timestamp = self.dbapi is None or \
+ hasattr(self.dbapi, 'TIMESTAMP')
+ self.auto_setinputsizes = auto_setinputsizes
+ self.auto_convert_lobs = auto_convert_lobs
+ self._retry_on_12516 = _retry_on_12516
+
+ if hasattr(self.dbapi, 'version'):
+ self.cx_oracle_ver = tuple([int(x) for x in
+ self.dbapi.version.split('.')])
+ else:
+ self.cx_oracle_ver = (0, 0, 0)
+
+ def types(*names):
+ return set(
+ getattr(self.dbapi, name, None) for name in names
+ ).difference([None])
+
+ self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
+ self._cx_oracle_string_types = types("STRING", "UNICODE",
+ "NCLOB", "CLOB")
+ self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
+ self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
+ self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
+
+ self.coerce_to_unicode = (
+ self.cx_oracle_ver >= (5, 0) and
+ coerce_to_unicode
+ )
+
+ self.supports_native_decimal = (
+ self.cx_oracle_ver >= (5, 0) and
+ coerce_to_decimal
+ )
+
+ self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
+
+ if self.cx_oracle_ver is None:
+ # this occurs in tests with mock DBAPIs
+ self._cx_oracle_string_types = set()
+ self._cx_oracle_with_unicode = False
+ elif util.py3k or (
+ self.cx_oracle_ver >= (5,) and not \
+ hasattr(self.dbapi, 'UNICODE')
+ ):
+ # cx_Oracle WITH_UNICODE mode. *only* python
+ # unicode objects accepted for anything
+ self.supports_unicode_statements = True
+ self.supports_unicode_binds = True
+ self._cx_oracle_with_unicode = True
+
+ if util.py2k:
+ # There's really no reason to run with WITH_UNICODE under
+ # Python 2.x. However as of cx_oracle 5.3 it seems to be
+ # set to ON for default builds
+ self.execution_ctx_cls = \
+ OracleExecutionContext_cx_oracle_with_unicode
+ else:
+ self._cx_oracle_with_unicode = False
+
+ if self.cx_oracle_ver is None or \
+ not self.auto_convert_lobs or \
+ not hasattr(self.dbapi, 'CLOB'):
+ self.dbapi_type_map = {}
+ else:
+ # only use this for LOB objects. using it for strings, dates
+ # etc. leads to a little too much magic, reflection doesn't know
+ # if it should expect encoded strings or unicodes, etc.
+ self.dbapi_type_map = {
+ self.dbapi.CLOB: oracle.CLOB(),
+ self.dbapi.NCLOB: oracle.NCLOB(),
+ self.dbapi.BLOB: oracle.BLOB(),
+ self.dbapi.BINARY: oracle.RAW(),
+ }
+
+ @classmethod
+ def dbapi(cls):
+ import cx_Oracle
+ return cx_Oracle
+
+ def connect(self, *cargs, **cparams):
+ if self._retry_on_12516:
+ # emergency flag for the SQLAlchemy test suite, which has
+ # decreased in stability since cx_oracle 5.3; generalized
+ # "retry on connect" functionality is part of an upcoming
+ # SQLAlchemy feature
+ try:
+ return self.dbapi.connect(*cargs, **cparams)
+ except self.dbapi.DatabaseError as err:
+ if "ORA-12516" in str(err):
+ time.sleep(2)
+ return self.dbapi.connect(*cargs, **cparams)
+ else:
+ raise
+ else:
+ return super(OracleDialect_cx_oracle, self).connect(
+ *cargs, **cparams)
+
+ def initialize(self, connection):
+ super(OracleDialect_cx_oracle, self).initialize(connection)
+ if self._is_oracle_8:
+ self.supports_unicode_binds = False
+ self._detect_decimal_char(connection)
+
+ def _detect_decimal_char(self, connection):
+ """detect if the decimal separator character is not '.', as
+ is the case with European locale settings for NLS_LANG.
+
+ cx_oracle itself uses similar logic when it formats Python
+ Decimal objects to strings on the bind side (as of 5.0.3),
+ as Oracle sends/receives string numerics only in the
+ current locale.
+
+ """
+ if self.cx_oracle_ver < (5,):
+ # no output type handlers before version 5
+ return
+
+ cx_Oracle = self.dbapi
+ conn = connection.connection
+
+ # override the output_type_handler that's
+ # on the cx_oracle connection with a plain
+ # one on the cursor
+
+ def output_type_handler(cursor, name, defaultType,
+ size, precision, scale):
+ return cursor.var(
+ cx_Oracle.STRING,
+ 255, arraysize=cursor.arraysize)
+
+ cursor = conn.cursor()
+ cursor.outputtypehandler = output_type_handler
+ cursor.execute("SELECT 0.1 FROM DUAL")
+ val = cursor.fetchone()[0]
+ cursor.close()
+ char = re.match(r"([\.,])", val).group(1)
+ if char != '.':
+ _detect_decimal = self._detect_decimal
+ self._detect_decimal = \
+ lambda value: _detect_decimal(value.replace(char, '.'))
+ self._to_decimal = \
+ lambda value: decimal.Decimal(value.replace(char, '.'))
+
+ def _detect_decimal(self, value):
+ if "." in value:
+ return decimal.Decimal(value)
+ else:
+ return int(value)
+
+ _to_decimal = decimal.Decimal
+
+ def on_connect(self):
+ if self.cx_oracle_ver < (5,):
+ # no output type handlers before version 5
+ return
+
+ cx_Oracle = self.dbapi
+
+ def output_type_handler(cursor, name, defaultType,
+ size, precision, scale):
+ # convert all NUMBER with precision + positive scale to Decimal
+ # this almost allows "native decimal" mode.
+ if self.supports_native_decimal and \
+ defaultType == cx_Oracle.NUMBER and \
+ precision and scale > 0:
+ return cursor.var(
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._to_decimal,
+ arraysize=cursor.arraysize)
+ # if NUMBER with zero precision and 0 or neg scale, this appears
+ # to indicate "ambiguous". Use a slower converter that will
+ # make a decision based on each value received - the type
+ # may change from row to row (!). This kills
+ # off "native decimal" mode, handlers still needed.
+ elif self.supports_native_decimal and \
+ defaultType == cx_Oracle.NUMBER \
+ and not precision and scale <= 0:
+ return cursor.var(
+ cx_Oracle.STRING,
+ 255,
+ outconverter=self._detect_decimal,
+ arraysize=cursor.arraysize)
+ # allow all strings to come back natively as Unicode
+ elif self.coerce_to_unicode and \
+ defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
+ return cursor.var(util.text_type, size, cursor.arraysize)
+
+ def on_connect(conn):
+ conn.outputtypehandler = output_type_handler
+
+ return on_connect
+
+ def create_connect_args(self, url):
+ dialect_opts = dict(url.query)
+ for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
+ 'threaded', 'allow_twophase'):
+ if opt in dialect_opts:
+ util.coerce_kw_type(dialect_opts, opt, bool)
+ setattr(self, opt, dialect_opts[opt])
+
+ database = url.database
+ service_name = dialect_opts.get('service_name', None)
+ if database or service_name:
+ # if we have a database, then we have a remote host
+ port = url.port
+ if port:
+ port = int(port)
+ else:
+ port = 1521
+
+ if database and service_name:
+ raise exc.InvalidRequestError(
+ '"service_name" option shouldn\'t '
+ 'be used with a "database" part of the url')
+ if database:
+ makedsn_kwargs = {'sid': database}
+ if service_name:
+ makedsn_kwargs = {'service_name': service_name}
+
+ dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs)
+ else:
+ # we have a local tnsname
+ dsn = url.host
+
+ opts = dict(
+ threaded=self.threaded,
+ twophase=self.allow_twophase,
+ )
+
+ if dsn is not None:
+ opts['dsn'] = dsn
+ if url.password is not None:
+ opts['password'] = url.password
+ if url.username is not None:
+ opts['user'] = url.username
+
+ if util.py2k:
+ if self._cx_oracle_with_unicode:
+ for k, v in opts.items():
+ if isinstance(v, str):
+ opts[k] = unicode(v)
+ else:
+ for k, v in opts.items():
+ if isinstance(v, unicode):
+ opts[k] = str(v)
+
+ if 'mode' in url.query:
+ opts['mode'] = url.query['mode']
+ if isinstance(opts['mode'], util.string_types):
+ mode = opts['mode'].upper()
+ if mode == 'SYSDBA':
+ opts['mode'] = self.dbapi.SYSDBA
+ elif mode == 'SYSOPER':
+ opts['mode'] = self.dbapi.SYSOPER
+ else:
+ util.coerce_kw_type(opts, 'mode', int)
+ return ([], opts)
+
+ def _get_server_version_info(self, connection):
+ return tuple(
+ int(x)
+ for x in connection.connection.version.split('.')
+ )
+
+ def is_disconnect(self, e, connection, cursor):
+ error, = e.args
+ if isinstance(e, self.dbapi.InterfaceError):
+ return "not connected" in str(e)
+ elif hasattr(error, 'code'):
+ # ORA-00028: your session has been killed
+ # ORA-03114: not connected to ORACLE
+ # ORA-03113: end-of-file on communication channel
+ # ORA-03135: connection lost contact
+ # ORA-01033: ORACLE initialization or shutdown in progress
+ # ORA-02396: exceeded maximum idle time, please connect again
+ # TODO: Others ?
+ return error.code in (28, 3114, 3113, 3135, 1033, 2396)
+ else:
+ return False
+
+ def create_xid(self):
+ """create a two-phase transaction ID.
+
+ this id will be passed to do_begin_twophase(), do_rollback_twophase(),
+ do_commit_twophase(). its format is unspecified."""
+
+ id = random.randint(0, 2 ** 128)
+ return (0x1234, "%032x" % id, "%032x" % 9)
+
+ def do_executemany(self, cursor, statement, parameters, context=None):
+ if isinstance(parameters, tuple):
+ parameters = list(parameters)
+ cursor.executemany(statement, parameters)
+
+ def do_begin_twophase(self, connection, xid):
+ connection.connection.begin(*xid)
+
+ def do_prepare_twophase(self, connection, xid):
+ result = connection.connection.prepare()
+ connection.info['cx_oracle_prepared'] = result
+
+ def do_rollback_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ self.do_rollback(connection.connection)
+
+ def do_commit_twophase(self, connection, xid, is_prepared=True,
+ recover=False):
+ if not is_prepared:
+ self.do_commit(connection.connection)
+ else:
+ oci_prepared = connection.info['cx_oracle_prepared']
+ if oci_prepared:
+ self.do_commit(connection.connection)
+
+ def do_recover_twophase(self, connection):
+ connection.info.pop('cx_oracle_prepared', None)
+
+dialect = OracleDialect_cx_oracle
diff --git a/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py
new file mode 100644
index 0000000..c8a31f1
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/oracle/zxjdbc.py
@@ -0,0 +1,235 @@
+# oracle/zxjdbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: oracle+zxjdbc
+ :name: zxJDBC for Jython
+ :dbapi: zxjdbc
+ :connectstring: oracle+zxjdbc://user:pass@host/dbname
+ :driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
+
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
+
+"""
+import decimal
+import re
+
+from sqlalchemy import sql, types as sqltypes, util
+from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
+from sqlalchemy.dialects.oracle.base import (OracleCompiler,
+ OracleDialect,
+ OracleExecutionContext)
+from sqlalchemy.engine import result as _result
+from sqlalchemy.sql import expression
+import collections
+
+SQLException = zxJDBC = None
+
+
+class _ZxJDBCDate(sqltypes.Date):
+
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ if value is None:
+ return None
+ else:
+ return value.date()
+ return process
+
+
+class _ZxJDBCNumeric(sqltypes.Numeric):
+
+ def result_processor(self, dialect, coltype):
+ # XXX: does the dialect return Decimal or not???
+ # if it does (in all cases), we could use a None processor as well as
+ # the to_float generic processor
+ if self.asdecimal:
+ def process(value):
+ if isinstance(value, decimal.Decimal):
+ return value
+ else:
+ return decimal.Decimal(str(value))
+ else:
+ def process(value):
+ if isinstance(value, decimal.Decimal):
+ return float(value)
+ else:
+ return value
+ return process
+
+
+class OracleCompiler_zxjdbc(OracleCompiler):
+
+ def returning_clause(self, stmt, returning_cols):
+ self.returning_cols = list(
+ expression._select_iterables(returning_cols))
+
+ # within_columns_clause=False so that labels (foo AS bar) don't render
+ columns = [self.process(c, within_columns_clause=False)
+ for c in self.returning_cols]
+
+ if not hasattr(self, 'returning_parameters'):
+ self.returning_parameters = []
+
+ binds = []
+ for i, col in enumerate(self.returning_cols):
+ dbtype = col.type.dialect_impl(
+ self.dialect).get_dbapi_type(self.dialect.dbapi)
+ self.returning_parameters.append((i + 1, dbtype))
+
+ bindparam = sql.bindparam(
+ "ret_%d" % i, value=ReturningParam(dbtype))
+ self.binds[bindparam.key] = bindparam
+ binds.append(
+ self.bindparam_string(self._truncate_bindparam(bindparam)))
+
+ return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
+
+
+class OracleExecutionContext_zxjdbc(OracleExecutionContext):
+
+ def pre_exec(self):
+ if hasattr(self.compiled, 'returning_parameters'):
+ # prepare a zxJDBC statement so we can grab its underlying
+ # OraclePreparedStatement's getReturnResultSet later
+ self.statement = self.cursor.prepare(self.statement)
+
+ def get_result_proxy(self):
+ if hasattr(self.compiled, 'returning_parameters'):
+ rrs = None
+ try:
+ try:
+ rrs = self.statement.__statement__.getReturnResultSet()
+ next(rrs)
+ except SQLException as sqle:
+ msg = '%s [SQLCode: %d]' % (
+ sqle.getMessage(), sqle.getErrorCode())
+ if sqle.getSQLState() is not None:
+ msg += ' [SQLState: %s]' % sqle.getSQLState()
+ raise zxJDBC.Error(msg)
+ else:
+ row = tuple(
+ self.cursor.datahandler.getPyObject(
+ rrs, index, dbtype)
+ for index, dbtype in
+ self.compiled.returning_parameters)
+ return ReturningResultProxy(self, row)
+ finally:
+ if rrs is not None:
+ try:
+ rrs.close()
+ except SQLException:
+ pass
+ self.statement.close()
+
+ return _result.ResultProxy(self)
+
+ def create_cursor(self):
+ cursor = self._dbapi_connection.cursor()
+ cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
+ return cursor
+
+
+class ReturningResultProxy(_result.FullyBufferedResultProxy):
+
+ """ResultProxy backed by the RETURNING ResultSet results."""
+
+ def __init__(self, context, returning_row):
+ self._returning_row = returning_row
+ super(ReturningResultProxy, self).__init__(context)
+
+ def _cursor_description(self):
+ ret = []
+ for c in self.context.compiled.returning_cols:
+ if hasattr(c, 'name'):
+ ret.append((c.name, c.type))
+ else:
+ ret.append((c.anon_label, c.type))
+ return ret
+
+ def _buffer_rows(self):
+ return collections.deque([self._returning_row])
+
+
+class ReturningParam(object):
+
+ """A bindparam value representing a RETURNING parameter.
+
+ Specially handled by OracleReturningDataHandler.
+ """
+
+ def __init__(self, type):
+ self.type = type
+
+ def __eq__(self, other):
+ if isinstance(other, ReturningParam):
+ return self.type == other.type
+ return NotImplemented
+
+ def __ne__(self, other):
+ if isinstance(other, ReturningParam):
+ return self.type != other.type
+ return NotImplemented
+
+ def __repr__(self):
+ kls = self.__class__
+ return '<%s.%s object at 0x%x type=%s>' % (
+ kls.__module__, kls.__name__, id(self), self.type)
+
+
+class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
+ jdbc_db_name = 'oracle'
+ jdbc_driver_name = 'oracle.jdbc.OracleDriver'
+
+ statement_compiler = OracleCompiler_zxjdbc
+ execution_ctx_cls = OracleExecutionContext_zxjdbc
+
+ colspecs = util.update_copy(
+ OracleDialect.colspecs,
+ {
+ sqltypes.Date: _ZxJDBCDate,
+ sqltypes.Numeric: _ZxJDBCNumeric
+ }
+ )
+
+ def __init__(self, *args, **kwargs):
+ super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
+ global SQLException, zxJDBC
+ from java.sql import SQLException
+ from com.ziclix.python.sql import zxJDBC
+ from com.ziclix.python.sql.handler import OracleDataHandler
+
+ class OracleReturningDataHandler(OracleDataHandler):
+ """zxJDBC DataHandler that specially handles ReturningParam."""
+
+ def setJDBCObject(self, statement, index, object, dbtype=None):
+ if type(object) is ReturningParam:
+ statement.registerReturnParameter(index, object.type)
+ elif dbtype is None:
+ OracleDataHandler.setJDBCObject(
+ self, statement, index, object)
+ else:
+ OracleDataHandler.setJDBCObject(
+ self, statement, index, object, dbtype)
+ self.DataHandler = OracleReturningDataHandler
+
+ def initialize(self, connection):
+ super(OracleDialect_zxjdbc, self).initialize(connection)
+ self.implicit_returning = \
+ connection.connection.driverversion >= '10.2'
+
+ def _create_jdbc_url(self, url):
+ return 'jdbc:oracle:thin:@%s:%s:%s' % (
+ url.host, url.port or 1521, url.database)
+
+ def _get_server_version_info(self, connection):
+ version = re.search(
+ r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
+ return tuple(int(x) for x in version.split('.'))
+
+dialect = OracleDialect_zxjdbc
diff --git a/app/lib/sqlalchemy/dialects/postgresql/__init__.py b/app/lib/sqlalchemy/dialects/postgresql/__init__.py
new file mode 100644
index 0000000..a6872cf
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -0,0 +1,36 @@
+# postgresql/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from . import base, psycopg2, pg8000, pypostgresql, pygresql, \
+ zxjdbc, psycopg2cffi
+
+base.dialect = psycopg2.dialect
+
+from .base import \
+ INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
+ INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \
+ DATE, BYTEA, BOOLEAN, INTERVAL, ENUM, dialect, TSVECTOR, DropEnumType, \
+ CreateEnumType
+from .hstore import HSTORE, hstore
+from .json import JSON, JSONB
+from .array import array, ARRAY, Any, All
+from .ext import aggregate_order_by, ExcludeConstraint, array_agg
+from .dml import insert, Insert
+
+from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
+ TSTZRANGE
+
+__all__ = (
+ 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
+ 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID',
+ 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
+ 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'array', 'HSTORE',
+ 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
+ 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'Any', 'All',
+ 'DropEnumType', 'CreateEnumType', 'ExcludeConstraint',
+ 'aggregate_order_by', 'array_agg', 'insert', 'Insert'
+)
diff --git a/app/lib/sqlalchemy/dialects/postgresql/array.py b/app/lib/sqlalchemy/dialects/postgresql/array.py
new file mode 100644
index 0000000..98cab95
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/array.py
@@ -0,0 +1,314 @@
+# postgresql/array.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from .base import ischema_names
+from ...sql import expression, operators
+from ...sql.base import SchemaEventTarget
+from ... import types as sqltypes
+
+try:
+ from uuid import UUID as _python_UUID
+except ImportError:
+ _python_UUID = None
+
+
+def Any(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.any` method.
+
+ This method is legacy and is here for backwards-compatibility.
+
+ .. seealso::
+
+ :func:`.expression.any_`
+
+ """
+
+ return arrexpr.any(other, operator)
+
+
+def All(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.all` method.
+
+ This method is legacy and is here for backwards-compatibility.
+
+ .. seealso::
+
+ :func:`.expression.all_`
+
+ """
+
+ return arrexpr.all(other, operator)
+
+
+class array(expression.Tuple):
+
+ """A PostgreSQL ARRAY literal.
+
+ This is used to produce ARRAY literals in SQL expressions, e.g.::
+
+ from sqlalchemy.dialects.postgresql import array
+ from sqlalchemy.dialects import postgresql
+ from sqlalchemy import select, func
+
+ stmt = select([
+ array([1,2]) + array([3,4,5])
+ ])
+
+ print stmt.compile(dialect=postgresql.dialect())
+
+ Produces the SQL::
+
+ SELECT ARRAY[%(param_1)s, %(param_2)s] ||
+ ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
+
+ An instance of :class:`.array` will always have the datatype
+ :class:`.ARRAY`. The "inner" type of the array is inferred from
+ the values present, unless the ``type_`` keyword argument is passed::
+
+ array(['foo', 'bar'], type_=CHAR)
+
+ .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
+
+ See also:
+
+ :class:`.postgresql.ARRAY`
+
+ """
+ __visit_name__ = 'array'
+
+ def __init__(self, clauses, **kw):
+ super(array, self).__init__(*clauses, **kw)
+ self.type = ARRAY(self.type)
+
+ def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
+ if _assume_scalar or operator is operators.getitem:
+ # if getitem->slice were called, Indexable produces
+ # a Slice object from that
+ assert isinstance(obj, int)
+ return expression.BindParameter(
+ None, obj, _compared_to_operator=operator,
+ type_=type_,
+ _compared_to_type=self.type, unique=True)
+
+ else:
+ return array([
+ self._bind_param(operator, o, _assume_scalar=True, type_=type_)
+ for o in obj])
+
+ def self_group(self, against=None):
+ if (against in (
+ operators.any_op, operators.all_op, operators.getitem)):
+ return expression.Grouping(self)
+ else:
+ return self
+
+
+CONTAINS = operators.custom_op("@>", precedence=5)
+
+CONTAINED_BY = operators.custom_op("<@", precedence=5)
+
+OVERLAP = operators.custom_op("&&", precedence=5)
+
+
+class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
+
+ """PostgreSQL ARRAY type.
+
+ .. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
+ a subclass of the core :class:`.types.ARRAY` type.
+
+ The :class:`.postgresql.ARRAY` type is constructed in the same way
+ as the core :class:`.types.ARRAY` type; a member type is required, and a
+ number of dimensions is recommended if the type is to be used for more
+ than one dimension::
+
+ from sqlalchemy.dialects import postgresql
+
+ mytable = Table("mytable", metadata,
+ Column("data", postgresql.ARRAY(Integer, dimensions=2))
+ )
+
+ The :class:`.postgresql.ARRAY` type provides all operations defined on the
+ core :class:`.types.ARRAY` type, including support for "dimensions", indexed
+ access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
+ and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
+ provides PostgreSQL-specific methods for containment operations, including
+ :meth:`.postgresql.ARRAY.Comparator.contains`
+ :meth:`.postgresql.ARRAY.Comparator.contained_by`,
+ and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
+
+ mytable.c.data.contains([1, 2])
+
+ The :class:`.postgresql.ARRAY` type may not be supported on all
+ PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
+
+ Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
+ conjunction with the :class:`.ENUM` type. For a workaround, see the
+ special type at :ref:`postgresql_array_of_enum`.
+
+ .. seealso::
+
+ :class:`.types.ARRAY` - base array type
+
+ :class:`.postgresql.array` - produces a literal array value.
+
+ """
+
+ class Comparator(sqltypes.ARRAY.Comparator):
+
+ """Define comparison operations for :class:`.ARRAY`.
+
+ Note that these operations are in addition to those provided
+ by the base :class:`.types.ARRAY.Comparator` class, including
+ :meth:`.types.ARRAY.Comparator.any` and
+ :meth:`.types.ARRAY.Comparator.all`.
+
+ """
+
+ def contains(self, other, **kwargs):
+ """Boolean expression. Test if elements are a superset of the
+ elements of the argument array expression.
+ """
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+ def contained_by(self, other):
+ """Boolean expression. Test if elements are a proper subset of the
+ elements of the argument array expression.
+ """
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ def overlap(self, other):
+ """Boolean expression. Test if array has elements in common with
+ an argument array expression.
+ """
+ return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
+
+ def __init__(self, item_type, as_tuple=False, dimensions=None,
+ zero_indexes=False):
+ """Construct an ARRAY.
+
+ E.g.::
+
+ Column('myarray', ARRAY(Integer))
+
+ Arguments are:
+
+ :param item_type: The data type of items of this array. Note that
+ dimensionality is irrelevant here, so multi-dimensional arrays like
+ ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
+ ``ARRAY(ARRAY(Integer))`` or such.
+
+ :param as_tuple=False: Specify whether return results
+ should be converted to tuples from lists. DBAPIs such
+ as psycopg2 return lists by default. When tuples are
+ returned, the results are hashable.
+
+ :param dimensions: if non-None, the ARRAY will assume a fixed
+ number of dimensions. This will cause the DDL emitted for this
+ ARRAY to include the exact number of bracket clauses ``[]``,
+ and will also optimize the performance of the type overall.
+ Note that PG arrays are always implicitly "non-dimensioned",
+ meaning they can store any number of dimensions no matter how
+ they were declared.
+
+ :param zero_indexes=False: when True, index values will be converted
+ between Python zero-based and PostgreSQL one-based indexes, e.g.
+ a value of one will be added to all index values before passing
+ to the database.
+
+ .. versionadded:: 0.9.5
+
+
+ """
+ if isinstance(item_type, ARRAY):
+ raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype")
+ if isinstance(item_type, type):
+ item_type = item_type()
+ self.item_type = item_type
+ self.as_tuple = as_tuple
+ self.dimensions = dimensions
+ self.zero_indexes = zero_indexes
+
+ @property
+ def hashable(self):
+ return self.as_tuple
+
+ @property
+ def python_type(self):
+ return list
+
+ def compare_values(self, x, y):
+ return x == y
+
+ def _set_parent(self, column):
+ """Support SchemaEventTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent(column)
+
+ def _set_parent_with_dispatch(self, parent):
+ """Support SchemaEventTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent_with_dispatch(parent)
+
+ def _proc_array(self, arr, itemproc, dim, collection):
+ if dim is None:
+ arr = list(arr)
+ if dim == 1 or dim is None and (
+ # this has to be (list, tuple), or at least
+ # not hasattr('__iter__'), since Py3K strings
+ # etc. have __iter__
+ not arr or not isinstance(arr[0], (list, tuple))):
+ if itemproc:
+ return collection(itemproc(x) for x in arr)
+ else:
+ return collection(arr)
+ else:
+ return collection(
+ self._proc_array(
+ x, itemproc,
+ dim - 1 if dim is not None else None,
+ collection)
+ for x in arr
+ )
+
+ def bind_processor(self, dialect):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ bind_processor(dialect)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ list)
+ return process
+
+ def result_processor(self, dialect, coltype):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ result_processor(dialect, coltype)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ tuple if self.as_tuple else list)
+ return process
+
+ischema_names['_array'] = ARRAY
diff --git a/app/lib/sqlalchemy/dialects/postgresql/base.py b/app/lib/sqlalchemy/dialects/postgresql/base.py
new file mode 100644
index 0000000..26d974e
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/base.py
@@ -0,0 +1,2989 @@
+# postgresql/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+.. dialect:: postgresql
+ :name: PostgreSQL
+
+.. _postgresql_sequences:
+
+Sequences/SERIAL
+----------------
+
+PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
+of creating new primary key values for integer-based primary key columns. When
+creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
+integer-based primary key columns, which generates a sequence and server side
+default corresponding to the column.
+
+To specify a specific named sequence to be used for primary key generation,
+use the :func:`~sqlalchemy.schema.Sequence` construct::
+
+ Table('sometable', metadata,
+ Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
+ )
+
+When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
+having the "last insert identifier" available, a RETURNING clause is added to
+the INSERT statement which specifies the primary key columns should be
+returned after the statement completes. The RETURNING functionality only takes
+place if PostgreSQL 8.2 or later is in use. As a fallback approach, the
+sequence, whether specified explicitly or implicitly via ``SERIAL``, is
+executed independently beforehand, the returned value to be used in the
+subsequent insert. Note that when an
+:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
+"executemany" semantics, the "last inserted identifier" functionality does not
+apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
+case.
+
+To force the usage of RETURNING by default off, specify the flag
+``implicit_returning=False`` to :func:`.create_engine`.
+
+.. _postgresql_isolation_level:
+
+Transaction Isolation Level
+---------------------------
+
+All PostgreSQL dialects support setting of transaction isolation level
+both via a dialect-specific parameter
+:paramref:`.create_engine.isolation_level` accepted by :func:`.create_engine`,
+as well as the :paramref:`.Connection.execution_options.isolation_level`
+argument as passed to :meth:`.Connection.execution_options`.
+When using a non-psycopg2 dialect, this feature works by issuing the command
+``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL `` for
+each new connection. For the special AUTOCOMMIT isolation level,
+DBAPI-specific techniques are used.
+
+To set isolation level using :func:`.create_engine`::
+
+ engine = create_engine(
+ "postgresql+pg8000://scott:tiger@localhost/test",
+ isolation_level="READ UNCOMMITTED"
+ )
+
+To set using per-connection execution options::
+
+ connection = engine.connect()
+ connection = connection.execution_options(
+ isolation_level="READ COMMITTED"
+ )
+
+Valid values for ``isolation_level`` include:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT`` - on psycopg2 / pg8000 only
+
+.. seealso::
+
+ :ref:`psycopg2_isolation_level`
+
+ :ref:`pg8000_isolation_level`
+
+.. _postgresql_schema_reflection:
+
+Remote-Schema Table Introspection and PostgreSQL search_path
+------------------------------------------------------------
+
+The PostgreSQL dialect can reflect tables from any schema. The
+:paramref:`.Table.schema` argument, or alternatively the
+:paramref:`.MetaData.reflect.schema` argument determines which schema will
+be searched for the table or tables. The reflected :class:`.Table` objects
+will in all cases retain this ``.schema`` attribute as was specified.
+However, with regards to tables which these :class:`.Table` objects refer to
+via foreign key constraint, a decision must be made as to how the ``.schema``
+is represented in those remote tables, in the case where that remote
+schema name is also a member of the current
+`PostgreSQL search path
+`_.
+
+By default, the PostgreSQL dialect mimics the behavior encouraged by
+PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function
+returns a sample definition for a particular foreign key constraint,
+omitting the referenced schema name from that definition when the name is
+also in the PostgreSQL schema search path. The interaction below
+illustrates this behavior::
+
+ test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
+ CREATE TABLE
+ test=> CREATE TABLE referring(
+ test(> id INTEGER PRIMARY KEY,
+ test(> referred_id INTEGER REFERENCES test_schema.referred(id));
+ CREATE TABLE
+ test=> SET search_path TO public, test_schema;
+ test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
+ test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
+ test-> WHERE c.relname='referring' AND r.contype = 'f'
+ test-> ;
+ pg_get_constraintdef
+ ---------------------------------------------------
+ FOREIGN KEY (referred_id) REFERENCES referred(id)
+ (1 row)
+
+Above, we created a table ``referred`` as a member of the remote schema
+``test_schema``, however when we added ``test_schema`` to the
+PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
+``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
+the function.
+
+On the other hand, if we set the search path back to the typical default
+of ``public``::
+
+ test=> SET search_path TO public;
+ SET
+
+The same query against ``pg_get_constraintdef()`` now returns the fully
+schema-qualified name for us::
+
+ test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
+ test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
+ test-> ON n.oid = c.relnamespace
+ test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
+ test-> WHERE c.relname='referring' AND r.contype = 'f';
+ pg_get_constraintdef
+ ---------------------------------------------------------------
+ FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
+ (1 row)
+
+SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
+in order to determine the remote schema name. That is, if our ``search_path``
+were set to include ``test_schema``, and we invoked a table
+reflection process as follows::
+
+ >>> from sqlalchemy import Table, MetaData, create_engine
+ >>> engine = create_engine("postgresql://scott:tiger@localhost/test")
+ >>> with engine.connect() as conn:
+ ... conn.execute("SET search_path TO test_schema, public")
+ ... meta = MetaData()
+ ... referring = Table('referring', meta,
+ ... autoload=True, autoload_with=conn)
+ ...
+
+
+The above process would deliver to the :attr:`.MetaData.tables` collection
+``referred`` table named **without** the schema::
+
+ >>> meta.tables['referred'].schema is None
+ True
+
+To alter the behavior of reflection such that the referred schema is
+maintained regardless of the ``search_path`` setting, use the
+``postgresql_ignore_search_path`` option, which can be specified as a
+dialect-specific argument to both :class:`.Table` as well as
+:meth:`.MetaData.reflect`::
+
+ >>> with engine.connect() as conn:
+ ... conn.execute("SET search_path TO test_schema, public")
+ ... meta = MetaData()
+ ... referring = Table('referring', meta, autoload=True,
+ ... autoload_with=conn,
+ ... postgresql_ignore_search_path=True)
+ ...
+
+
+We will now have ``test_schema.referred`` stored as schema-qualified::
+
+ >>> meta.tables['test_schema.referred'].schema
+ 'test_schema'
+
+.. sidebar:: Best Practices for PostgreSQL Schema reflection
+
+ The description of PostgreSQL schema reflection behavior is complex, and
+ is the product of many years of dealing with widely varied use cases and
+ user preferences. But in fact, there's no need to understand any of it if
+ you just stick to the simplest use pattern: leave the ``search_path`` set
+ to its default of ``public`` only, never refer to the name ``public`` as
+ an explicit schema name otherwise, and refer to all other schema names
+ explicitly when building up a :class:`.Table` object. The options
+ described here are only for those users who can't, or prefer not to, stay
+ within these guidelines.
+
+Note that **in all cases**, the "default" schema is always reflected as
+``None``. The "default" schema on PostgreSQL is that which is returned by the
+PostgreSQL ``current_schema()`` function. On a typical PostgreSQL
+installation, this is the name ``public``. So a table that refers to another
+which is in the ``public`` (i.e. default) schema will always have the
+``.schema`` attribute set to ``None``.
+
+.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
+ dialect-level option accepted by :class:`.Table` and
+ :meth:`.MetaData.reflect`.
+
+
+.. seealso::
+
+ `The Schema Search Path
+ `_
+ - on the PostgreSQL website.
+
+INSERT/UPDATE...RETURNING
+-------------------------
+
+The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
+``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
+for single-row INSERT statements in order to fetch newly generated
+primary key identifiers. To specify an explicit ``RETURNING`` clause,
+use the :meth:`._UpdateBase.returning` method on a per-statement basis::
+
+ # INSERT..RETURNING
+ result = table.insert().returning(table.c.col1, table.c.col2).\
+ values(name='foo')
+ print result.fetchall()
+
+ # UPDATE..RETURNING
+ result = table.update().returning(table.c.col1, table.c.col2).\
+ where(table.c.name=='foo').values(name='bar')
+ print result.fetchall()
+
+ # DELETE..RETURNING
+ result = table.delete().returning(table.c.col1, table.c.col2).\
+ where(table.c.name=='foo')
+ print result.fetchall()
+
+.. _postgresql_insert_on_conflict:
+
+INSERT...ON CONFLICT (Upsert)
+------------------------------
+
+Starting with version 9.5, PostgreSQL allows "upserts" (update or insert)
+of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement.
+A candidate row will only be inserted if that row does not violate
+any unique constraints. In the case of a unique constraint violation,
+a secondary action can occur which can be either "DO UPDATE", indicating
+that the data in the target row should be updated, or "DO NOTHING",
+which indicates to silently skip this row.
+
+Conflicts are determined using existing unique constraints and indexes. These
+constraints may be identified either using their name as stated in DDL,
+or they may be *inferred* by stating the columns and conditions that comprise
+the indexes.
+
+SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific
+:func:`.postgresql.dml.insert()` function, which provides
+the generative methods :meth:`~.postgresql.dml.Insert.on_conflict_do_update`
+and :meth:`~.postgresql.dml.Insert.on_conflict_do_nothing`::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ insert_stmt = insert(my_table).values(
+ id='some_existing_id',
+ data='inserted value')
+
+ do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
+ index_elements=['id']
+ )
+
+ conn.execute(do_nothing_stmt)
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ constraint='pk_my_table',
+ set_=dict(data='updated value')
+ )
+
+ conn.execute(do_update_stmt)
+
+Both methods supply the "target" of the conflict using either the
+named constraint or by column inference:
+
+* The :paramref:`.Insert.on_conflict_do_update.index_elements` argument
+ specifies a sequence containing string column names, :class:`.Column` objects,
+ and/or SQL expression elements, which would identify a unique index::
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ index_elements=['id'],
+ set_=dict(data='updated value')
+ )
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ index_elements=[my_table.c.id],
+ set_=dict(data='updated value')
+ )
+
+* When using :paramref:`.Insert.on_conflict_do_update.index_elements` to
+ infer an index, a partial index can be inferred by also specifying the
+ use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
+ stmt = stmt.on_conflict_do_update(
+ index_elements=[my_table.c.user_email],
+ index_where=my_table.c.user_email.like('%@gmail.com'),
+ set_=dict(data=stmt.excluded.data)
+ )
+ conn.execute(stmt)
+
+
+* The :paramref:`.Insert.on_conflict_do_update.constraint` argument is
+ used to specify an index directly rather than inferring it. This can be
+ the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX::
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ constraint='my_table_idx_1',
+ set_=dict(data='updated value')
+ )
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ constraint='my_table_pk',
+ set_=dict(data='updated value')
+ )
+
+* The :paramref:`.Insert.on_conflict_do_update.constraint` argument may
+ also refer to a SQLAlchemy construct representing a constraint,
+ e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`,
+ :class:`.Index`, or :class:`.ExcludeConstraint`. In this use,
+ if the constraint has a name, it is used directly. Otherwise, if the
+ constraint is unnamed, then inference will be used, where the expressions
+ and optional WHERE clause of the constraint will be spelled out in the
+ construct. This use is especially convenient
+ to refer to the named or unnamed primary key of a :class:`.Table` using the
+ :attr:`.Table.primary_key` attribute::
+
+ do_update_stmt = insert_stmt.on_conflict_do_update(
+ constraint=my_table.primary_key,
+ set_=dict(data='updated value')
+ )
+
+``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
+existing row, using any combination of new values as well as values
+from the proposed insertion. These values are specified using the
+:paramref:`.Insert.on_conflict_do_update.set_` parameter. This
+parameter accepts a dictionary which consists of direct values
+for UPDATE::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(id='some_id', data='inserted value')
+ do_update_stmt = stmt.on_conflict_do_update(
+ index_elements=['id'],
+ set_=dict(data='updated value')
+ )
+ conn.execute(do_update_stmt)
+
+.. warning::
+
+ The :meth:`.Insert.on_conflict_do_update` method does **not** take into
+ account Python-side default UPDATE values or generation functions, e.g.
+ e.g. those specified using :paramref:`.Column.onupdate`.
+ These values will not be exercised for an ON CONFLICT style of UPDATE,
+ unless they are manually specified in the
+ :paramref:`.Insert.on_conflict_do_update.set_` dictionary.
+
+In order to refer to the proposed insertion row, the special alias
+:attr:`~.postgresql.dml.Insert.excluded` is available as an attribute on
+the :class:`.postgresql.dml.Insert` object; this object is a
+:class:`.ColumnCollection` which alias contains all columns of the target
+table::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(
+ id='some_id',
+ data='inserted value',
+ author='jlh')
+ do_update_stmt = stmt.on_conflict_do_update(
+ index_elements=['id'],
+ set_=dict(data='updated value', author=stmt.excluded.author)
+ )
+ conn.execute(do_update_stmt)
+
+The :meth:`.Insert.on_conflict_do_update` method also accepts
+a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where`
+parameter, which will limit those rows which receive an UPDATE::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(
+ id='some_id',
+ data='inserted value',
+ author='jlh')
+ on_update_stmt = stmt.on_conflict_do_update(
+ index_elements=['id'],
+ set_=dict(data='updated value', author=stmt.excluded.author)
+ where=(my_table.c.status == 2)
+ )
+ conn.execute(on_update_stmt)
+
+``ON CONFLICT`` may also be used to skip inserting a row entirely
+if any conflict with a unique or exclusion constraint occurs; below
+this is illustrated using the
+:meth:`~.postgresql.dml.Insert.on_conflict_do_nothing` method::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(id='some_id', data='inserted value')
+ stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
+ conn.execute(stmt)
+
+If ``DO NOTHING`` is used without specifying any columns or constraint,
+it has the effect of skipping the INSERT for any unique or exclusion
+constraint violation which occurs::
+
+ from sqlalchemy.dialects.postgresql import insert
+
+ stmt = insert(my_table).values(id='some_id', data='inserted value')
+ stmt = stmt.on_conflict_do_nothing()
+ conn.execute(stmt)
+
+.. versionadded:: 1.1 Added support for PostgreSQL ON CONFLICT clauses
+
+.. seealso::
+
+ `INSERT .. ON CONFLICT `_ - in the PostgreSQL documentation.
+
+.. _postgresql_match:
+
+Full Text Search
+----------------
+
+SQLAlchemy makes available the PostgreSQL ``@@`` operator via the
+:meth:`.ColumnElement.match` method on any textual column expression.
+On a PostgreSQL dialect, an expression like the following::
+
+ select([sometable.c.text.match("search string")])
+
+will emit to the database::
+
+ SELECT text @@ to_tsquery('search string') FROM table
+
+The PostgreSQL text search functions such as ``to_tsquery()``
+and ``to_tsvector()`` are available
+explicitly using the standard :data:`.func` construct. For example::
+
+ select([
+ func.to_tsvector('fat cats ate rats').match('cat & rat')
+ ])
+
+Emits the equivalent of::
+
+ SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
+
+The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST::
+
+ from sqlalchemy.dialects.postgresql import TSVECTOR
+ from sqlalchemy import select, cast
+ select([cast("some text", TSVECTOR)])
+
+produces a statement equivalent to::
+
+ SELECT CAST('some text' AS TSVECTOR) AS anon_1
+
+Full Text Searches in PostgreSQL are influenced by a combination of: the
+PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used
+to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
+during a query.
+
+When performing a Full Text Search against a column that has a GIN or
+GiST index that is already pre-computed (which is common on full text
+searches) one may need to explicitly pass in a particular PostgresSQL
+``regconfig`` value to ensure the query-planner utilizes the index and does
+not re-compute the column on demand.
+
+In order to provide for this explicit query planning, or to use different
+search strategies, the ``match`` method accepts a ``postgresql_regconfig``
+keyword argument::
+
+ select([mytable.c.id]).where(
+ mytable.c.title.match('somestring', postgresql_regconfig='english')
+ )
+
+Emits the equivalent of::
+
+ SELECT mytable.id FROM mytable
+ WHERE mytable.title @@ to_tsquery('english', 'somestring')
+
+One can also specifically pass in a `'regconfig'` value to the
+``to_tsvector()`` command as the initial argument::
+
+ select([mytable.c.id]).where(
+ func.to_tsvector('english', mytable.c.title )\
+ .match('somestring', postgresql_regconfig='english')
+ )
+
+produces a statement equivalent to::
+
+ SELECT mytable.id FROM mytable
+ WHERE to_tsvector('english', mytable.title) @@
+ to_tsquery('english', 'somestring')
+
+It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
+PostgresSQL to ensure that you are generating queries with SQLAlchemy that
+take full advantage of any indexes you may have created for full text search.
+
+FROM ONLY ...
+------------------------
+
+The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
+table in an inheritance hierarchy. This can be used to produce the
+``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
+syntaxes. It uses SQLAlchemy's hints mechanism::
+
+ # SELECT ... FROM ONLY ...
+ result = table.select().with_hint(table, 'ONLY', 'postgresql')
+ print result.fetchall()
+
+ # UPDATE ONLY ...
+ table.update(values=dict(foo='bar')).with_hint('ONLY',
+ dialect_name='postgresql')
+
+ # DELETE FROM ONLY ...
+ table.delete().with_hint('ONLY', dialect_name='postgresql')
+
+
+.. _postgresql_indexes:
+
+PostgreSQL-Specific Index Options
+---------------------------------
+
+Several extensions to the :class:`.Index` construct are available, specific
+to the PostgreSQL dialect.
+
+.. _postgresql_partial_indexes:
+
+Partial Indexes
+^^^^^^^^^^^^^^^^
+
+Partial indexes add criterion to the index definition so that the index is
+applied to a subset of rows. These can be specified on :class:`.Index`
+using the ``postgresql_where`` keyword argument::
+
+ Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10)
+
+Operator Classes
+^^^^^^^^^^^^^^^^^
+
+PostgreSQL allows the specification of an *operator class* for each column of
+an index (see
+http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
+The :class:`.Index` construct allows these to be specified via the
+``postgresql_ops`` keyword argument::
+
+ Index('my_index', my_table.c.id, my_table.c.data,
+ postgresql_ops={
+ 'data': 'text_pattern_ops',
+ 'id': 'int4_ops'
+ })
+
+.. versionadded:: 0.7.2
+ ``postgresql_ops`` keyword argument to :class:`.Index` construct.
+
+Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
+the :class:`.Column`, i.e. the name used to access it from the ``.c``
+collection of :class:`.Table`, which can be configured to be different than
+the actual name of the column as expressed in the database.
+
+Index Types
+^^^^^^^^^^^^
+
+PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
+as the ability for users to create their own (see
+http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
+specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
+
+ Index('my_index', my_table.c.data, postgresql_using='gin')
+
+The value passed to the keyword argument will be simply passed through to the
+underlying CREATE INDEX command, so it *must* be a valid index type for your
+version of PostgreSQL.
+
+.. _postgresql_index_storage:
+
+Index Storage Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL allows storage parameters to be set on indexes. The storage
+parameters available depend on the index method used by the index. Storage
+parameters can be specified on :class:`.Index` using the ``postgresql_with``
+keyword argument::
+
+ Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
+
+.. versionadded:: 1.0.6
+
+PostgreSQL allows to define the tablespace in which to create the index.
+The tablespace can be specified on :class:`.Index` using the
+``postgresql_tablespace`` keyword argument::
+
+ Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace')
+
+.. versionadded:: 1.1
+
+Note that the same option is available on :class:`.Table` as well.
+
+.. _postgresql_index_concurrently:
+
+Indexes with CONCURRENTLY
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The PostgreSQL index option CONCURRENTLY is supported by passing the
+flag ``postgresql_concurrently`` to the :class:`.Index` construct::
+
+ tbl = Table('testtbl', m, Column('data', Integer))
+
+ idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
+
+The above index construct will render DDL for CREATE INDEX, assuming
+PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as::
+
+ CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
+
+For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for
+a connection-less dialect, it will emit::
+
+ DROP INDEX CONCURRENTLY test_idx1
+
+.. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The
+ CONCURRENTLY keyword is now only emitted if a high enough version
+ of PostgreSQL is detected on the connection (or for a connection-less
+ dialect).
+
+When using CONCURRENTLY, the Postgresql database requires that the statement
+be invoked outside of a transaction block. The Python DBAPI enforces that
+even for a single statement, a transaction is present, so to use this
+construct, the DBAPI's "autocommit" mode must be used::
+
+ metadata = MetaData()
+ table = Table(
+ "foo", metadata,
+ Column("id", String))
+ index = Index(
+ "foo_idx", table.c.id, postgresql_concurrently=True)
+
+ with engine.connect() as conn:
+ with conn.execution_options(isolation_level='AUTOCOMMIT'):
+ table.create(conn)
+
+.. seealso::
+
+ :ref:`postgresql_isolation_level`
+
+.. _postgresql_index_reflection:
+
+PostgreSQL Index Reflection
+---------------------------
+
+The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the
+UNIQUE CONSTRAINT construct is used. When inspecting a table using
+:class:`.Inspector`, the :meth:`.Inspector.get_indexes`
+and the :meth:`.Inspector.get_unique_constraints` will report on these
+two constructs distinctly; in the case of the index, the key
+``duplicates_constraint`` will be present in the index entry if it is
+detected as mirroring a constraint. When performing reflection using
+``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned
+in :attr:`.Table.indexes` when it is detected as mirroring a
+:class:`.UniqueConstraint` in the :attr:`.Table.constraints` collection.
+
+.. versionchanged:: 1.0.0 - :class:`.Table` reflection now includes
+ :class:`.UniqueConstraint` objects present in the :attr:`.Table.constraints`
+ collection; the PostgreSQL backend will no longer include a "mirrored"
+ :class:`.Index` construct in :attr:`.Table.indexes` if it is detected
+ as corresponding to a unique constraint.
+
+Special Reflection Options
+--------------------------
+
+The :class:`.Inspector` used for the PostgreSQL backend is an instance
+of :class:`.PGInspector`, which offers additional methods::
+
+ from sqlalchemy import create_engine, inspect
+
+ engine = create_engine("postgresql+psycopg2://localhost/test")
+ insp = inspect(engine) # will be a PGInspector
+
+ print(insp.get_enums())
+
+.. autoclass:: PGInspector
+ :members:
+
+.. _postgresql_table_options:
+
+PostgreSQL Table Options
+-------------------------
+
+Several options for CREATE TABLE are supported directly by the PostgreSQL
+dialect in conjunction with the :class:`.Table` construct:
+
+* ``TABLESPACE``::
+
+ Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
+
+ The above option is also available on the :class:`.Index` construct.
+
+* ``ON COMMIT``::
+
+ Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
+
+* ``WITH OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=True)
+
+* ``WITHOUT OIDS``::
+
+ Table("some_table", metadata, ..., postgresql_with_oids=False)
+
+* ``INHERITS``::
+
+ Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
+
+ Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
+
+.. versionadded:: 1.0.0
+
+.. seealso::
+
+ `PostgreSQL CREATE TABLE options
+ `_
+
+ARRAY Types
+-----------
+
+The PostgreSQL dialect supports arrays, both as multidimensional column types
+as well as array literals:
+
+* :class:`.postgresql.ARRAY` - ARRAY datatype
+
+* :class:`.postgresql.array` - array literal
+
+* :func:`.postgresql.array_agg` - ARRAY_AGG SQL function
+
+* :class:`.postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
+ function syntax.
+
+JSON Types
+----------
+
+The PostgreSQL dialect supports both JSON and JSONB datatypes, including
+psycopg2's native support and support for all of PostgreSQL's special
+operators:
+
+* :class:`.postgresql.JSON`
+
+* :class:`.postgresql.JSONB`
+
+HSTORE Type
+-----------
+
+The PostgreSQL HSTORE type as well as hstore literals are supported:
+
+* :class:`.postgresql.HSTORE` - HSTORE datatype
+
+* :class:`.postgresql.hstore` - hstore literal
+
+ENUM Types
+----------
+
+PostgreSQL has an independently creatable TYPE structure which is used
+to implement an enumerated type. This approach introduces significant
+complexity on the SQLAlchemy side in terms of when this type should be
+CREATED and DROPPED. The type object is also an independently reflectable
+entity. The following sections should be consulted:
+
+* :class:`.postgresql.ENUM` - DDL and typing support for ENUM.
+
+* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types
+
+* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
+ CREATE and DROP commands for ENUM.
+
+.. _postgresql_array_of_enum:
+
+Using ENUM with ARRAY
+^^^^^^^^^^^^^^^^^^^^^
+
+The combination of ENUM and ARRAY is not directly supported by backend
+DBAPIs at this time. In order to send and receive an ARRAY of ENUM,
+use the following workaround type::
+
+ class ArrayOfEnum(ARRAY):
+
+ def bind_expression(self, bindvalue):
+ return sa.cast(bindvalue, self)
+
+ def result_processor(self, dialect, coltype):
+ super_rp = super(ArrayOfEnum, self).result_processor(
+ dialect, coltype)
+
+ def handle_raw_string(value):
+ inner = re.match(r"^{(.*)}$", value).group(1)
+ return inner.split(",") if inner else []
+
+ def process(value):
+ if value is None:
+ return None
+ return super_rp(handle_raw_string(value))
+ return process
+
+E.g.::
+
+ Table(
+ 'mydata', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
+
+ )
+
+This type is not included as a built-in type as it would be incompatible
+with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
+a new version.
+
+.. _postgresql_array_of_json:
+
+Using JSON/JSONB with ARRAY
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Similar to using ENUM, for an ARRAY of JSON/JSONB we need to render the
+appropriate CAST, however current psycopg2 drivers seem to handle the result
+for ARRAY of JSON automatically, so the type is simpler::
+
+
+ class CastingArray(ARRAY):
+ def bind_expression(self, bindvalue):
+ return sa.cast(bindvalue, self)
+
+E.g.::
+
+ Table(
+ 'mydata', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', CastingArray(JSONB))
+ )
+
+
+"""
+from collections import defaultdict
+import re
+import datetime as dt
+
+
+from sqlalchemy.sql import elements
+from ... import sql, schema, exc, util
+from ...engine import default, reflection
+from ...sql import compiler, expression
+from ... import types as sqltypes
+
+try:
+ from uuid import UUID as _python_UUID
+except ImportError:
+ _python_UUID = None
+
+from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
+ CHAR, TEXT, FLOAT, NUMERIC, \
+ DATE, BOOLEAN, REAL
+
+AUTOCOMMIT_REGEXP = re.compile(
+ r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|'
+ 'IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW)',
+ re.I | re.UNICODE)
+
+RESERVED_WORDS = set(
+ ["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
+ "asymmetric", "both", "case", "cast", "check", "collate", "column",
+ "constraint", "create", "current_catalog", "current_date",
+ "current_role", "current_time", "current_timestamp", "current_user",
+ "default", "deferrable", "desc", "distinct", "do", "else", "end",
+ "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
+ "having", "in", "initially", "intersect", "into", "leading", "limit",
+ "localtime", "localtimestamp", "new", "not", "null", "of", "off",
+ "offset", "old", "on", "only", "or", "order", "placing", "primary",
+ "references", "returning", "select", "session_user", "some", "symmetric",
+ "table", "then", "to", "trailing", "true", "union", "unique", "user",
+ "using", "variadic", "when", "where", "window", "with", "authorization",
+ "between", "binary", "cross", "current_schema", "freeze", "full",
+ "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
+ "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
+ ])
+
+_DECIMAL_TYPES = (1231, 1700)
+_FLOAT_TYPES = (700, 701, 1021, 1022)
+_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
+
+class BYTEA(sqltypes.LargeBinary):
+ __visit_name__ = 'BYTEA'
+
+
+class DOUBLE_PRECISION(sqltypes.Float):
+ __visit_name__ = 'DOUBLE_PRECISION'
+
+
+class INET(sqltypes.TypeEngine):
+ __visit_name__ = "INET"
+PGInet = INET
+
+
+class CIDR(sqltypes.TypeEngine):
+ __visit_name__ = "CIDR"
+PGCidr = CIDR
+
+
+class MACADDR(sqltypes.TypeEngine):
+ __visit_name__ = "MACADDR"
+PGMacAddr = MACADDR
+
+
+class OID(sqltypes.TypeEngine):
+
+ """Provide the PostgreSQL OID type.
+
+ .. versionadded:: 0.9.5
+
+ """
+ __visit_name__ = "OID"
+
+
+class TIMESTAMP(sqltypes.TIMESTAMP):
+
+ def __init__(self, timezone=False, precision=None):
+ super(TIMESTAMP, self).__init__(timezone=timezone)
+ self.precision = precision
+
+
+class TIME(sqltypes.TIME):
+
+ def __init__(self, timezone=False, precision=None):
+ super(TIME, self).__init__(timezone=timezone)
+ self.precision = precision
+
+
+class INTERVAL(sqltypes.TypeEngine):
+
+ """PostgreSQL INTERVAL type.
+
+ The INTERVAL type may not be supported on all DBAPIs.
+ It is known to work on psycopg2 and not pg8000 or zxjdbc.
+
+ """
+ __visit_name__ = 'INTERVAL'
+
+ def __init__(self, precision=None):
+ self.precision = precision
+
+ @classmethod
+ def _adapt_from_generic_interval(cls, interval):
+ return INTERVAL(precision=interval.second_precision)
+
+ @property
+ def _type_affinity(self):
+ return sqltypes.Interval
+
+ @property
+ def python_type(self):
+ return dt.timedelta
+
+PGInterval = INTERVAL
+
+
+class BIT(sqltypes.TypeEngine):
+ __visit_name__ = 'BIT'
+
+ def __init__(self, length=None, varying=False):
+ if not varying:
+ # BIT without VARYING defaults to length 1
+ self.length = length or 1
+ else:
+ # but BIT VARYING can be unlimited-length, so no default
+ self.length = length
+ self.varying = varying
+
+PGBit = BIT
+
+
+class UUID(sqltypes.TypeEngine):
+
+ """PostgreSQL UUID type.
+
+ Represents the UUID column type, interpreting
+ data either as natively returned by the DBAPI
+ or as Python uuid objects.
+
+ The UUID type may not be supported on all DBAPIs.
+ It is known to work on psycopg2 and not pg8000.
+
+ """
+ __visit_name__ = 'UUID'
+
+ def __init__(self, as_uuid=False):
+ """Construct a UUID type.
+
+
+ :param as_uuid=False: if True, values will be interpreted
+ as Python uuid objects, converting to/from string via the
+ DBAPI.
+
+ """
+ if as_uuid and _python_UUID is None:
+ raise NotImplementedError(
+ "This version of Python does not support "
+ "the native UUID type."
+ )
+ self.as_uuid = as_uuid
+
+ def bind_processor(self, dialect):
+ if self.as_uuid:
+ def process(value):
+ if value is not None:
+ value = util.text_type(value)
+ return value
+ return process
+ else:
+ return None
+
+ def result_processor(self, dialect, coltype):
+ if self.as_uuid:
+ def process(value):
+ if value is not None:
+ value = _python_UUID(value)
+ return value
+ return process
+ else:
+ return None
+
+PGUuid = UUID
+
+
+class TSVECTOR(sqltypes.TypeEngine):
+
+ """The :class:`.postgresql.TSVECTOR` type implements the PostgreSQL
+ text search type TSVECTOR.
+
+ It can be used to do full text queries on natural language
+ documents.
+
+ .. versionadded:: 0.9.0
+
+ .. seealso::
+
+ :ref:`postgresql_match`
+
+ """
+ __visit_name__ = 'TSVECTOR'
+
+
+class ENUM(sqltypes.Enum):
+
+ """PostgreSQL ENUM type.
+
+ This is a subclass of :class:`.types.Enum` which includes
+ support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
+
+ When the builtin type :class:`.types.Enum` is used and the
+ :paramref:`.Enum.native_enum` flag is left at its default of
+ True, the PostgreSQL backend will use a :class:`.postgresql.ENUM`
+ type as the implementation, so the special create/drop rules
+ will be used.
+
+ The create/drop behavior of ENUM is necessarily intricate, due to the
+ awkward relationship the ENUM type has in relationship to the
+ parent table, in that it may be "owned" by just a single table, or
+ may be shared among many tables.
+
+ When using :class:`.types.Enum` or :class:`.postgresql.ENUM`
+ in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
+ corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop`
+ methods are called::
+
+ table = Table('sometable', metadata,
+ Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
+ )
+
+ table.create(engine) # will emit CREATE ENUM and CREATE TABLE
+ table.drop(engine) # will emit DROP TABLE and DROP ENUM
+
+ To use a common enumerated type between multiple tables, the best
+ practice is to declare the :class:`.types.Enum` or
+ :class:`.postgresql.ENUM` independently, and associate it with the
+ :class:`.MetaData` object itself::
+
+ my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
+
+ t1 = Table('sometable_one', metadata,
+ Column('some_enum', myenum)
+ )
+
+ t2 = Table('sometable_two', metadata,
+ Column('some_enum', myenum)
+ )
+
+ When this pattern is used, care must still be taken at the level
+ of individual table creates. Emitting CREATE TABLE without also
+ specifying ``checkfirst=True`` will still cause issues::
+
+ t1.create(engine) # will fail: no such type 'myenum'
+
+ If we specify ``checkfirst=True``, the individual table-level create
+ operation will check for the ``ENUM`` and create if not exists::
+
+ # will check if enum exists, and emit CREATE TYPE if not
+ t1.create(engine, checkfirst=True)
+
+ When using a metadata-level ENUM type, the type will always be created
+ and dropped if either the metadata-wide create/drop is called::
+
+ metadata.create_all(engine) # will emit CREATE TYPE
+ metadata.drop_all(engine) # will emit DROP TYPE
+
+ The type can also be created and dropped directly::
+
+ my_enum.create(engine)
+ my_enum.drop(engine)
+
+ .. versionchanged:: 1.0.0 The PostgreSQL :class:`.postgresql.ENUM` type
+ now behaves more strictly with regards to CREATE/DROP. A metadata-level
+ ENUM type will only be created and dropped at the metadata level,
+ not the table level, with the exception of
+ ``table.create(checkfirst=True)``.
+ The ``table.drop()`` call will now emit a DROP TYPE for a table-level
+ enumerated type.
+
+ """
+
+ def __init__(self, *enums, **kw):
+ """Construct an :class:`~.postgresql.ENUM`.
+
+ Arguments are the same as that of
+ :class:`.types.Enum`, but also including
+ the following parameters.
+
+ :param create_type: Defaults to True.
+ Indicates that ``CREATE TYPE`` should be
+ emitted, after optionally checking for the
+ presence of the type, when the parent
+ table is being created; and additionally
+ that ``DROP TYPE`` is called when the table
+ is dropped. When ``False``, no check
+ will be performed and no ``CREATE TYPE``
+ or ``DROP TYPE`` is emitted, unless
+ :meth:`~.postgresql.ENUM.create`
+ or :meth:`~.postgresql.ENUM.drop`
+ are called directly.
+ Setting to ``False`` is helpful
+ when invoking a creation scheme to a SQL file
+ without access to the actual database -
+ the :meth:`~.postgresql.ENUM.create` and
+ :meth:`~.postgresql.ENUM.drop` methods can
+ be used to emit SQL to a target bind.
+
+ .. versionadded:: 0.7.4
+
+ """
+ self.create_type = kw.pop("create_type", True)
+ super(ENUM, self).__init__(*enums, **kw)
+
+ def create(self, bind=None, checkfirst=True):
+ """Emit ``CREATE TYPE`` for this
+ :class:`~.postgresql.ENUM`.
+
+ If the underlying dialect does not support
+ PostgreSQL CREATE TYPE, no action is taken.
+
+ :param bind: a connectable :class:`.Engine`,
+ :class:`.Connection`, or similar object to emit
+ SQL.
+ :param checkfirst: if ``True``, a query against
+ the PG catalog will be first performed to see
+ if the type does not exist already before
+ creating.
+
+ """
+ if not bind.dialect.supports_native_enum:
+ return
+
+ if not checkfirst or \
+ not bind.dialect.has_type(
+ bind, self.name, schema=self.schema):
+ bind.execute(CreateEnumType(self))
+
+ def drop(self, bind=None, checkfirst=True):
+ """Emit ``DROP TYPE`` for this
+ :class:`~.postgresql.ENUM`.
+
+ If the underlying dialect does not support
+ PostgreSQL DROP TYPE, no action is taken.
+
+ :param bind: a connectable :class:`.Engine`,
+ :class:`.Connection`, or similar object to emit
+ SQL.
+ :param checkfirst: if ``True``, a query against
+ the PG catalog will be first performed to see
+ if the type actually exists before dropping.
+
+ """
+ if not bind.dialect.supports_native_enum:
+ return
+
+ if not checkfirst or \
+ bind.dialect.has_type(bind, self.name, schema=self.schema):
+ bind.execute(DropEnumType(self))
+
+ def _check_for_name_in_memos(self, checkfirst, kw):
+ """Look in the 'ddl runner' for 'memos', then
+ note our name in that collection.
+
+ This to ensure a particular named enum is operated
+ upon only once within any kind of create/drop
+ sequence without relying upon "checkfirst".
+
+ """
+ if not self.create_type:
+ return True
+ if '_ddl_runner' in kw:
+ ddl_runner = kw['_ddl_runner']
+ if '_pg_enums' in ddl_runner.memo:
+ pg_enums = ddl_runner.memo['_pg_enums']
+ else:
+ pg_enums = ddl_runner.memo['_pg_enums'] = set()
+ present = self.name in pg_enums
+ pg_enums.add(self.name)
+ return present
+ else:
+ return False
+
+ def _on_table_create(self, target, bind, checkfirst=False, **kw):
+ if checkfirst or (
+ not self.metadata and
+ not kw.get('_is_metadata_operation', False)) and \
+ not self._check_for_name_in_memos(checkfirst, kw):
+ self.create(bind=bind, checkfirst=checkfirst)
+
+ def _on_table_drop(self, target, bind, checkfirst=False, **kw):
+ if not self.metadata and \
+ not kw.get('_is_metadata_operation', False) and \
+ not self._check_for_name_in_memos(checkfirst, kw):
+ self.drop(bind=bind, checkfirst=checkfirst)
+
+ def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
+ if not self._check_for_name_in_memos(checkfirst, kw):
+ self.create(bind=bind, checkfirst=checkfirst)
+
+ def _on_metadata_drop(self, target, bind, checkfirst=False, **kw):
+ if not self._check_for_name_in_memos(checkfirst, kw):
+ self.drop(bind=bind, checkfirst=checkfirst)
+
+colspecs = {
+ sqltypes.Interval: INTERVAL,
+ sqltypes.Enum: ENUM,
+}
+
+ischema_names = {
+ 'integer': INTEGER,
+ 'bigint': BIGINT,
+ 'smallint': SMALLINT,
+ 'character varying': VARCHAR,
+ 'character': CHAR,
+ '"char"': sqltypes.String,
+ 'name': sqltypes.String,
+ 'text': TEXT,
+ 'numeric': NUMERIC,
+ 'float': FLOAT,
+ 'real': REAL,
+ 'inet': INET,
+ 'cidr': CIDR,
+ 'uuid': UUID,
+ 'bit': BIT,
+ 'bit varying': BIT,
+ 'macaddr': MACADDR,
+ 'oid': OID,
+ 'double precision': DOUBLE_PRECISION,
+ 'timestamp': TIMESTAMP,
+ 'timestamp with time zone': TIMESTAMP,
+ 'timestamp without time zone': TIMESTAMP,
+ 'time with time zone': TIME,
+ 'time without time zone': TIME,
+ 'date': DATE,
+ 'time': TIME,
+ 'bytea': BYTEA,
+ 'boolean': BOOLEAN,
+ 'interval': INTERVAL,
+ 'interval year to month': INTERVAL,
+ 'interval day to second': INTERVAL,
+ 'tsvector': TSVECTOR
+}
+
+
+class PGCompiler(compiler.SQLCompiler):
+
+ def visit_array(self, element, **kw):
+ return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
+
+ def visit_slice(self, element, **kw):
+ return "%s:%s" % (
+ self.process(element.start, **kw),
+ self.process(element.stop, **kw),
+ )
+
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ kw['eager_grouping'] = True
+ return self._generate_generic_binary(
+ binary, " -> ", **kw
+ )
+
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ kw['eager_grouping'] = True
+ return self._generate_generic_binary(
+ binary, " #> ", **kw
+ )
+
+ def visit_getitem_binary(self, binary, operator, **kw):
+ return "%s[%s]" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ def visit_aggregate_order_by(self, element, **kw):
+ return "%s ORDER BY %s" % (
+ self.process(element.target, **kw),
+ self.process(element.order_by, **kw)
+ )
+
+ def visit_match_op_binary(self, binary, operator, **kw):
+ if "postgresql_regconfig" in binary.modifiers:
+ regconfig = self.render_literal_value(
+ binary.modifiers['postgresql_regconfig'],
+ sqltypes.STRINGTYPE)
+ if regconfig:
+ return "%s @@ to_tsquery(%s, %s)" % (
+ self.process(binary.left, **kw),
+ regconfig,
+ self.process(binary.right, **kw)
+ )
+ return "%s @@ to_tsquery(%s)" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw)
+ )
+
+ def visit_ilike_op_binary(self, binary, operator, **kw):
+ escape = binary.modifiers.get("escape", None)
+
+ return '%s ILIKE %s' % \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ + (
+ ' ESCAPE ' +
+ self.render_literal_value(escape, sqltypes.STRINGTYPE)
+ if escape else ''
+ )
+
+ def visit_notilike_op_binary(self, binary, operator, **kw):
+ escape = binary.modifiers.get("escape", None)
+ return '%s NOT ILIKE %s' % \
+ (self.process(binary.left, **kw),
+ self.process(binary.right, **kw)) \
+ + (
+ ' ESCAPE ' +
+ self.render_literal_value(escape, sqltypes.STRINGTYPE)
+ if escape else ''
+ )
+
+ def render_literal_value(self, value, type_):
+ value = super(PGCompiler, self).render_literal_value(value, type_)
+
+ if self.dialect._backslash_escapes:
+ value = value.replace('\\', '\\\\')
+ return value
+
+ def visit_sequence(self, seq):
+ return "nextval('%s')" % self.preparer.format_sequence(seq)
+
+ def limit_clause(self, select, **kw):
+ text = ""
+ if select._limit_clause is not None:
+ text += " \n LIMIT " + self.process(select._limit_clause, **kw)
+ if select._offset_clause is not None:
+ if select._limit_clause is None:
+ text += " \n LIMIT ALL"
+ text += " OFFSET " + self.process(select._offset_clause, **kw)
+ return text
+
+ def format_from_hint_text(self, sqltext, table, hint, iscrud):
+ if hint.upper() != 'ONLY':
+ raise exc.CompileError("Unrecognized hint: %r" % hint)
+ return "ONLY " + sqltext
+
+ def get_select_precolumns(self, select, **kw):
+ if select._distinct is not False:
+ if select._distinct is True:
+ return "DISTINCT "
+ elif isinstance(select._distinct, (list, tuple)):
+ return "DISTINCT ON (" + ', '.join(
+ [self.process(col) for col in select._distinct]
+ ) + ") "
+ else:
+ return "DISTINCT ON (" + \
+ self.process(select._distinct, **kw) + ") "
+ else:
+ return ""
+
+ def for_update_clause(self, select, **kw):
+
+ if select._for_update_arg.read:
+ if select._for_update_arg.key_share:
+ tmp = " FOR KEY SHARE"
+ else:
+ tmp = " FOR SHARE"
+ elif select._for_update_arg.key_share:
+ tmp = " FOR NO KEY UPDATE"
+ else:
+ tmp = " FOR UPDATE"
+
+ if select._for_update_arg.of:
+ tables = util.OrderedSet(
+ c.table if isinstance(c, expression.ColumnClause)
+ else c for c in select._for_update_arg.of)
+ tmp += " OF " + ", ".join(
+ self.process(table, ashint=True, use_schema=False, **kw)
+ for table in tables
+ )
+
+ if select._for_update_arg.nowait:
+ tmp += " NOWAIT"
+ if select._for_update_arg.skip_locked:
+ tmp += " SKIP LOCKED"
+
+ return tmp
+
+ def returning_clause(self, stmt, returning_cols):
+
+ columns = [
+ self._label_select_column(None, c, True, False, {})
+ for c in expression._select_iterables(returning_cols)
+ ]
+
+ return 'RETURNING ' + ', '.join(columns)
+
+ def visit_substring_func(self, func, **kw):
+ s = self.process(func.clauses.clauses[0], **kw)
+ start = self.process(func.clauses.clauses[1], **kw)
+ if len(func.clauses.clauses) > 2:
+ length = self.process(func.clauses.clauses[2], **kw)
+ return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
+ else:
+ return "SUBSTRING(%s FROM %s)" % (s, start)
+
+ def _on_conflict_target(self, clause, **kw):
+
+ if clause.constraint_target is not None:
+ target_text = 'ON CONSTRAINT %s' % clause.constraint_target
+ elif clause.inferred_target_elements is not None:
+ target_text = '(%s)' % ', '.join(
+ (self.preparer.quote(c)
+ if isinstance(c, util.string_types)
+ else
+ self.process(c, include_table=False, use_schema=False))
+ for c in clause.inferred_target_elements
+ )
+ if clause.inferred_target_whereclause is not None:
+ target_text += ' WHERE %s' % \
+ self.process(
+ clause.inferred_target_whereclause,
+ include_table=False,
+ use_schema=False
+ )
+ else:
+ target_text = ''
+
+ return target_text
+
+ def visit_on_conflict_do_nothing(self, on_conflict, **kw):
+
+ target_text = self._on_conflict_target(on_conflict, **kw)
+
+ if target_text:
+ return "ON CONFLICT %s DO NOTHING" % target_text
+ else:
+ return "ON CONFLICT DO NOTHING"
+
+ def visit_on_conflict_do_update(self, on_conflict, **kw):
+
+ clause = on_conflict
+
+ target_text = self._on_conflict_target(on_conflict, **kw)
+
+ action_set_ops = []
+
+ set_parameters = dict(clause.update_values_to_set)
+ # create a list of column assignment clauses as tuples
+ cols = self.statement.table.c
+ for c in cols:
+ col_key = c.key
+ if col_key in set_parameters:
+ value = set_parameters.pop(col_key)
+ if elements._is_literal(value):
+ value = elements.BindParameter(
+ None, value, type_=c.type
+ )
+
+ else:
+ if isinstance(value, elements.BindParameter) and \
+ value.type._isnull:
+ value = value._clone()
+ value.type = c.type
+ value_text = self.process(value.self_group(), use_schema=False)
+
+ key_text = (
+ self.preparer.quote(col_key)
+ )
+ action_set_ops.append('%s = %s' % (key_text, value_text))
+
+ # check for names that don't match columns
+ if set_parameters:
+ util.warn(
+ "Additional column names not matching "
+ "any column keys in table '%s': %s" % (
+ self.statement.table.name,
+ (", ".join("'%s'" % c for c in set_parameters))
+ )
+ )
+ for k, v in set_parameters.items():
+ key_text = (
+ self.preparer.quote(k)
+ if isinstance(k, util.string_types)
+ else self.process(k, use_schema=False)
+ )
+ value_text = self.process(
+ elements._literal_as_binds(v),
+ use_schema=False
+ )
+ action_set_ops.append('%s = %s' % (key_text, value_text))
+
+ action_text = ', '.join(action_set_ops)
+ if clause.update_whereclause is not None:
+ action_text += ' WHERE %s' % \
+ self.process(
+ clause.update_whereclause,
+ include_table=True,
+ use_schema=False
+ )
+
+ return 'ON CONFLICT %s DO UPDATE SET %s' % (target_text, action_text)
+
+
+class PGDDLCompiler(compiler.DDLCompiler):
+
+ def get_column_specification(self, column, **kwargs):
+
+ colspec = self.preparer.format_column(column)
+ impl_type = column.type.dialect_impl(self.dialect)
+ if isinstance(impl_type, sqltypes.TypeDecorator):
+ impl_type = impl_type.impl
+
+ if column.primary_key and \
+ column is column.table._autoincrement_column and \
+ (
+ self.dialect.supports_smallserial or
+ not isinstance(impl_type, sqltypes.SmallInteger)
+ ) and (
+ column.default is None or
+ (
+ isinstance(column.default, schema.Sequence) and
+ column.default.optional
+ )):
+ if isinstance(impl_type, sqltypes.BigInteger):
+ colspec += " BIGSERIAL"
+ elif isinstance(impl_type, sqltypes.SmallInteger):
+ colspec += " SMALLSERIAL"
+ else:
+ colspec += " SERIAL"
+ else:
+ colspec += " " + self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ default = self.get_column_default_string(column)
+ if default is not None:
+ colspec += " DEFAULT " + default
+
+ if not column.nullable:
+ colspec += " NOT NULL"
+ return colspec
+
+ def visit_create_enum_type(self, create):
+ type_ = create.element
+
+ return "CREATE TYPE %s AS ENUM (%s)" % (
+ self.preparer.format_type(type_),
+ ", ".join(
+ self.sql_compiler.process(sql.literal(e), literal_binds=True)
+ for e in type_.enums)
+ )
+
+ def visit_drop_enum_type(self, drop):
+ type_ = drop.element
+
+ return "DROP TYPE %s" % (
+ self.preparer.format_type(type_)
+ )
+
+ def visit_create_index(self, create):
+ preparer = self.preparer
+ index = create.element
+ self._verify_index_table(index)
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ text += "INDEX "
+
+ if self.dialect._supports_create_index_concurrently:
+ concurrently = index.dialect_options['postgresql']['concurrently']
+ if concurrently:
+ text += "CONCURRENTLY "
+
+ text += "%s ON %s " % (
+ self._prepared_index_name(index,
+ include_schema=False),
+ preparer.format_table(index.table)
+ )
+
+ using = index.dialect_options['postgresql']['using']
+ if using:
+ text += "USING %s " % preparer.quote(using)
+
+ ops = index.dialect_options["postgresql"]["ops"]
+ text += "(%s)" \
+ % (
+ ', '.join([
+ self.sql_compiler.process(
+ expr.self_group()
+ if not isinstance(expr, expression.ColumnClause)
+ else expr,
+ include_table=False, literal_binds=True) +
+ (
+ (' ' + ops[expr.key])
+ if hasattr(expr, 'key')
+ and expr.key in ops else ''
+ )
+ for expr in index.expressions
+ ])
+ )
+
+ withclause = index.dialect_options['postgresql']['with']
+
+ if withclause:
+ text += " WITH (%s)" % (', '.join(
+ ['%s = %s' % storage_parameter
+ for storage_parameter in withclause.items()]))
+
+ tablespace_name = index.dialect_options['postgresql']['tablespace']
+
+ if tablespace_name:
+ text += " TABLESPACE %s" % preparer.quote(tablespace_name)
+
+ whereclause = index.dialect_options["postgresql"]["where"]
+
+ if whereclause is not None:
+ where_compiled = self.sql_compiler.process(
+ whereclause, include_table=False,
+ literal_binds=True)
+ text += " WHERE " + where_compiled
+ return text
+
+ def visit_drop_index(self, drop):
+ index = drop.element
+
+ text = "\nDROP INDEX "
+
+ if self.dialect._supports_drop_index_concurrently:
+ concurrently = index.dialect_options['postgresql']['concurrently']
+ if concurrently:
+ text += "CONCURRENTLY "
+
+ text += self._prepared_index_name(index, include_schema=True)
+ return text
+
+ def visit_exclude_constraint(self, constraint, **kw):
+ text = ""
+ if constraint.name is not None:
+ text += "CONSTRAINT %s " % \
+ self.preparer.format_constraint(constraint)
+ elements = []
+ for expr, name, op in constraint._render_exprs:
+ kw['include_table'] = False
+ elements.append(
+ "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
+ )
+ text += "EXCLUDE USING %s (%s)" % (constraint.using,
+ ', '.join(elements))
+ if constraint.where is not None:
+ text += ' WHERE (%s)' % self.sql_compiler.process(
+ constraint.where,
+ literal_binds=True)
+ text += self.define_constraint_deferrability(constraint)
+ return text
+
+ def post_create_table(self, table):
+ table_opts = []
+ pg_opts = table.dialect_options['postgresql']
+
+ inherits = pg_opts.get('inherits')
+ if inherits is not None:
+ if not isinstance(inherits, (list, tuple)):
+ inherits = (inherits, )
+ table_opts.append(
+ '\n INHERITS ( ' +
+ ', '.join(self.preparer.quote(name) for name in inherits) +
+ ' )')
+
+ if pg_opts['with_oids'] is True:
+ table_opts.append('\n WITH OIDS')
+ elif pg_opts['with_oids'] is False:
+ table_opts.append('\n WITHOUT OIDS')
+
+ if pg_opts['on_commit']:
+ on_commit_options = pg_opts['on_commit'].replace("_", " ").upper()
+ table_opts.append('\n ON COMMIT %s' % on_commit_options)
+
+ if pg_opts['tablespace']:
+ tablespace_name = pg_opts['tablespace']
+ table_opts.append(
+ '\n TABLESPACE %s' % self.preparer.quote(tablespace_name)
+ )
+
+ return ''.join(table_opts)
+
+
+class PGTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_TSVECTOR(self, type, **kw):
+ return "TSVECTOR"
+
+ def visit_INET(self, type_, **kw):
+ return "INET"
+
+ def visit_CIDR(self, type_, **kw):
+ return "CIDR"
+
+ def visit_MACADDR(self, type_, **kw):
+ return "MACADDR"
+
+ def visit_OID(self, type_, **kw):
+ return "OID"
+
+ def visit_FLOAT(self, type_, **kw):
+ if not type_.precision:
+ return "FLOAT"
+ else:
+ return "FLOAT(%(precision)s)" % {'precision': type_.precision}
+
+ def visit_DOUBLE_PRECISION(self, type_, **kw):
+ return "DOUBLE PRECISION"
+
+ def visit_BIGINT(self, type_, **kw):
+ return "BIGINT"
+
+ def visit_HSTORE(self, type_, **kw):
+ return "HSTORE"
+
+ def visit_JSON(self, type_, **kw):
+ return "JSON"
+
+ def visit_JSONB(self, type_, **kw):
+ return "JSONB"
+
+ def visit_INT4RANGE(self, type_, **kw):
+ return "INT4RANGE"
+
+ def visit_INT8RANGE(self, type_, **kw):
+ return "INT8RANGE"
+
+ def visit_NUMRANGE(self, type_, **kw):
+ return "NUMRANGE"
+
+ def visit_DATERANGE(self, type_, **kw):
+ return "DATERANGE"
+
+ def visit_TSRANGE(self, type_, **kw):
+ return "TSRANGE"
+
+ def visit_TSTZRANGE(self, type_, **kw):
+ return "TSTZRANGE"
+
+ def visit_datetime(self, type_, **kw):
+ return self.visit_TIMESTAMP(type_, **kw)
+
+ def visit_enum(self, type_, **kw):
+ if not type_.native_enum or not self.dialect.supports_native_enum:
+ return super(PGTypeCompiler, self).visit_enum(type_, **kw)
+ else:
+ return self.visit_ENUM(type_, **kw)
+
+ def visit_ENUM(self, type_, **kw):
+ return self.dialect.identifier_preparer.format_type(type_)
+
+ def visit_TIMESTAMP(self, type_, **kw):
+ return "TIMESTAMP%s %s" % (
+ "(%d)" % type_.precision
+ if getattr(type_, 'precision', None) is not None else "",
+ (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
+ )
+
+ def visit_TIME(self, type_, **kw):
+ return "TIME%s %s" % (
+ "(%d)" % type_.precision
+ if getattr(type_, 'precision', None) is not None else "",
+ (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
+ )
+
+ def visit_INTERVAL(self, type_, **kw):
+ if type_.precision is not None:
+ return "INTERVAL(%d)" % type_.precision
+ else:
+ return "INTERVAL"
+
+ def visit_BIT(self, type_, **kw):
+ if type_.varying:
+ compiled = "BIT VARYING"
+ if type_.length is not None:
+ compiled += "(%d)" % type_.length
+ else:
+ compiled = "BIT(%d)" % type_.length
+ return compiled
+
+ def visit_UUID(self, type_, **kw):
+ return "UUID"
+
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BYTEA(type_, **kw)
+
+ def visit_BYTEA(self, type_, **kw):
+ return "BYTEA"
+
+ def visit_ARRAY(self, type_, **kw):
+ return self.process(type_.item_type) + ('[]' * (type_.dimensions
+ if type_.dimensions
+ is not None else 1))
+
+
+class PGIdentifierPreparer(compiler.IdentifierPreparer):
+
+ reserved_words = RESERVED_WORDS
+
+ def _unquote_identifier(self, value):
+ if value[0] == self.initial_quote:
+ value = value[1:-1].\
+ replace(self.escape_to_quote, self.escape_quote)
+ return value
+
+ def format_type(self, type_, use_schema=True):
+ if not type_.name:
+ raise exc.CompileError("PostgreSQL ENUM type requires a name.")
+
+ name = self.quote(type_.name)
+ effective_schema = self.schema_for_object(type_)
+
+ if not self.omit_schema and use_schema and \
+ effective_schema is not None:
+ name = self.quote_schema(effective_schema) + "." + name
+ return name
+
+
+class PGInspector(reflection.Inspector):
+
+ def __init__(self, conn):
+ reflection.Inspector.__init__(self, conn)
+
+ def get_table_oid(self, table_name, schema=None):
+ """Return the OID for the given table name."""
+
+ return self.dialect.get_table_oid(self.bind, table_name, schema,
+ info_cache=self.info_cache)
+
+ def get_enums(self, schema=None):
+ """Return a list of ENUM objects.
+
+ Each member is a dictionary containing these fields:
+
+ * name - name of the enum
+ * schema - the schema name for the enum.
+ * visible - boolean, whether or not this enum is visible
+ in the default search path.
+ * labels - a list of string labels that apply to the enum.
+
+ :param schema: schema name. If None, the default schema
+ (typically 'public') is used. May also be set to '*' to
+ indicate load enums for all schemas.
+
+ .. versionadded:: 1.0.0
+
+ """
+ schema = schema or self.default_schema_name
+ return self.dialect._load_enums(self.bind, schema)
+
+ def get_foreign_table_names(self, schema=None):
+ """Return a list of FOREIGN TABLE names.
+
+ Behavior is similar to that of :meth:`.Inspector.get_table_names`,
+ except that the list is limited to those tables tha report a
+ ``relkind`` value of ``f``.
+
+ .. versionadded:: 1.0.0
+
+ """
+ schema = schema or self.default_schema_name
+ return self.dialect._get_foreign_table_names(self.bind, schema)
+
+ def get_view_names(self, schema=None, include=('plain', 'materialized')):
+ """Return all view names in `schema`.
+
+ :param schema: Optional, retrieve names from a non-default schema.
+ For special quoting, use :class:`.quoted_name`.
+
+ :param include: specify which types of views to return. Passed
+ as a string value (for a single type) or a tuple (for any number
+ of types). Defaults to ``('plain', 'materialized')``.
+
+ .. versionadded:: 1.1
+
+ """
+
+ return self.dialect.get_view_names(self.bind, schema,
+ info_cache=self.info_cache,
+ include=include)
+
+
+class CreateEnumType(schema._CreateDropBase):
+ __visit_name__ = "create_enum_type"
+
+
+class DropEnumType(schema._CreateDropBase):
+ __visit_name__ = "drop_enum_type"
+
+
+class PGExecutionContext(default.DefaultExecutionContext):
+
+ def fire_sequence(self, seq, type_):
+ return self._execute_scalar((
+ "select nextval('%s')" %
+ self.dialect.identifier_preparer.format_sequence(seq)), type_)
+
+ def get_insert_default(self, column):
+ if column.primary_key and \
+ column is column.table._autoincrement_column:
+ if column.server_default and column.server_default.has_argument:
+
+ # pre-execute passive defaults on primary key columns
+ return self._execute_scalar("select %s" %
+ column.server_default.arg,
+ column.type)
+
+ elif (column.default is None or
+ (column.default.is_sequence and
+ column.default.optional)):
+
+ # execute the sequence associated with a SERIAL primary
+ # key column. for non-primary-key SERIAL, the ID just
+ # generates server side.
+
+ try:
+ seq_name = column._postgresql_seq_name
+ except AttributeError:
+ tab = column.table.name
+ col = column.name
+ tab = tab[0:29 + max(0, (29 - len(col)))]
+ col = col[0:29 + max(0, (29 - len(tab)))]
+ name = "%s_%s_seq" % (tab, col)
+ column._postgresql_seq_name = seq_name = name
+
+ if column.table is not None:
+ effective_schema = self.connection.schema_for_object(
+ column.table)
+ else:
+ effective_schema = None
+
+ if effective_schema is not None:
+ exc = "select nextval('\"%s\".\"%s\"')" % \
+ (effective_schema, seq_name)
+ else:
+ exc = "select nextval('\"%s\"')" % \
+ (seq_name, )
+
+ return self._execute_scalar(exc, column.type)
+
+ return super(PGExecutionContext, self).get_insert_default(column)
+
+ def should_autocommit_text(self, statement):
+ return AUTOCOMMIT_REGEXP.match(statement)
+
+
+class PGDialect(default.DefaultDialect):
+ name = 'postgresql'
+ supports_alter = True
+ max_identifier_length = 63
+ supports_sane_rowcount = True
+
+ supports_native_enum = True
+ supports_native_boolean = True
+ supports_smallserial = True
+
+ supports_sequences = True
+ sequences_optional = True
+ preexecute_autoincrement_sequences = True
+ postfetch_lastrowid = False
+
+ supports_default_values = True
+ supports_empty_insert = False
+ supports_multivalues_insert = True
+ default_paramstyle = 'pyformat'
+ ischema_names = ischema_names
+ colspecs = colspecs
+
+ statement_compiler = PGCompiler
+ ddl_compiler = PGDDLCompiler
+ type_compiler = PGTypeCompiler
+ preparer = PGIdentifierPreparer
+ execution_ctx_cls = PGExecutionContext
+ inspector = PGInspector
+ isolation_level = None
+
+ construct_arguments = [
+ (schema.Index, {
+ "using": False,
+ "where": None,
+ "ops": {},
+ "concurrently": False,
+ "with": {},
+ "tablespace": None
+ }),
+ (schema.Table, {
+ "ignore_search_path": False,
+ "tablespace": None,
+ "with_oids": None,
+ "on_commit": None,
+ "inherits": None
+ }),
+ ]
+
+ reflection_options = ('postgresql_ignore_search_path', )
+
+ _backslash_escapes = True
+ _supports_create_index_concurrently = True
+ _supports_drop_index_concurrently = True
+
+ def __init__(self, isolation_level=None, json_serializer=None,
+ json_deserializer=None, **kwargs):
+ default.DefaultDialect.__init__(self, **kwargs)
+ self.isolation_level = isolation_level
+ self._json_deserializer = json_deserializer
+ self._json_serializer = json_serializer
+
+ def initialize(self, connection):
+ super(PGDialect, self).initialize(connection)
+ self.implicit_returning = self.server_version_info > (8, 2) and \
+ self.__dict__.get('implicit_returning', True)
+ self.supports_native_enum = self.server_version_info >= (8, 3)
+ if not self.supports_native_enum:
+ self.colspecs = self.colspecs.copy()
+ # pop base Enum type
+ self.colspecs.pop(sqltypes.Enum, None)
+ # psycopg2, others may have placed ENUM here as well
+ self.colspecs.pop(ENUM, None)
+
+ # http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
+ self.supports_smallserial = self.server_version_info >= (9, 2)
+
+ self._backslash_escapes = self.server_version_info < (8, 2) or \
+ connection.scalar(
+ "show standard_conforming_strings"
+ ) == 'off'
+
+ self._supports_create_index_concurrently = \
+ self.server_version_info >= (8, 2)
+ self._supports_drop_index_concurrently = \
+ self.server_version_info >= (9, 2)
+
+ def on_connect(self):
+ if self.isolation_level is not None:
+ def connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ return connect
+ else:
+ return None
+
+ _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
+ 'READ COMMITTED', 'REPEATABLE READ'])
+
+ def set_isolation_level(self, connection, level):
+ level = level.replace('_', ' ')
+ if level not in self._isolation_lookup:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+ cursor = connection.cursor()
+ cursor.execute(
+ "SET SESSION CHARACTERISTICS AS TRANSACTION "
+ "ISOLATION LEVEL %s" % level)
+ cursor.execute("COMMIT")
+ cursor.close()
+
+ def get_isolation_level(self, connection):
+ cursor = connection.cursor()
+ cursor.execute('show transaction isolation level')
+ val = cursor.fetchone()[0]
+ cursor.close()
+ return val.upper()
+
+ def do_begin_twophase(self, connection, xid):
+ self.do_begin(connection.connection)
+
+ def do_prepare_twophase(self, connection, xid):
+ connection.execute("PREPARE TRANSACTION '%s'" % xid)
+
+ def do_rollback_twophase(self, connection, xid,
+ is_prepared=True, recover=False):
+ if is_prepared:
+ if recover:
+ # FIXME: ugly hack to get out of transaction
+ # context when committing recoverable transactions
+ # Must find out a way how to make the dbapi not
+ # open a transaction.
+ connection.execute("ROLLBACK")
+ connection.execute("ROLLBACK PREPARED '%s'" % xid)
+ connection.execute("BEGIN")
+ self.do_rollback(connection.connection)
+ else:
+ self.do_rollback(connection.connection)
+
+ def do_commit_twophase(self, connection, xid,
+ is_prepared=True, recover=False):
+ if is_prepared:
+ if recover:
+ connection.execute("ROLLBACK")
+ connection.execute("COMMIT PREPARED '%s'" % xid)
+ connection.execute("BEGIN")
+ self.do_rollback(connection.connection)
+ else:
+ self.do_commit(connection.connection)
+
+ def do_recover_twophase(self, connection):
+ resultset = connection.execute(
+ sql.text("SELECT gid FROM pg_prepared_xacts"))
+ return [row[0] for row in resultset]
+
+ def _get_default_schema_name(self, connection):
+ return connection.scalar("select current_schema()")
+
+ def has_schema(self, connection, schema):
+ query = ("select nspname from pg_namespace "
+ "where lower(nspname)=:schema")
+ cursor = connection.execute(
+ sql.text(
+ query,
+ bindparams=[
+ sql.bindparam(
+ 'schema', util.text_type(schema.lower()),
+ type_=sqltypes.Unicode)]
+ )
+ )
+
+ return bool(cursor.first())
+
+ def has_table(self, connection, table_name, schema=None):
+ # seems like case gets folded in pg_class...
+ if schema is None:
+ cursor = connection.execute(
+ sql.text(
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where "
+ "pg_catalog.pg_table_is_visible(c.oid) "
+ "and relname=:name",
+ bindparams=[
+ sql.bindparam('name', util.text_type(table_name),
+ type_=sqltypes.Unicode)]
+ )
+ )
+ else:
+ cursor = connection.execute(
+ sql.text(
+ "select relname from pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where n.nspname=:schema and "
+ "relname=:name",
+ bindparams=[
+ sql.bindparam('name',
+ util.text_type(table_name),
+ type_=sqltypes.Unicode),
+ sql.bindparam('schema',
+ util.text_type(schema),
+ type_=sqltypes.Unicode)]
+ )
+ )
+ return bool(cursor.first())
+
+ def has_sequence(self, connection, sequence_name, schema=None):
+ if schema is None:
+ cursor = connection.execute(
+ sql.text(
+ "SELECT relname FROM pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where relkind='S' and "
+ "n.nspname=current_schema() "
+ "and relname=:name",
+ bindparams=[
+ sql.bindparam('name', util.text_type(sequence_name),
+ type_=sqltypes.Unicode)
+ ]
+ )
+ )
+ else:
+ cursor = connection.execute(
+ sql.text(
+ "SELECT relname FROM pg_class c join pg_namespace n on "
+ "n.oid=c.relnamespace where relkind='S' and "
+ "n.nspname=:schema and relname=:name",
+ bindparams=[
+ sql.bindparam('name', util.text_type(sequence_name),
+ type_=sqltypes.Unicode),
+ sql.bindparam('schema',
+ util.text_type(schema),
+ type_=sqltypes.Unicode)
+ ]
+ )
+ )
+
+ return bool(cursor.first())
+
+ def has_type(self, connection, type_name, schema=None):
+ if schema is not None:
+ query = """
+ SELECT EXISTS (
+ SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
+ WHERE t.typnamespace = n.oid
+ AND t.typname = :typname
+ AND n.nspname = :nspname
+ )
+ """
+ query = sql.text(query)
+ else:
+ query = """
+ SELECT EXISTS (
+ SELECT * FROM pg_catalog.pg_type t
+ WHERE t.typname = :typname
+ AND pg_type_is_visible(t.oid)
+ )
+ """
+ query = sql.text(query)
+ query = query.bindparams(
+ sql.bindparam('typname',
+ util.text_type(type_name), type_=sqltypes.Unicode),
+ )
+ if schema is not None:
+ query = query.bindparams(
+ sql.bindparam('nspname',
+ util.text_type(schema), type_=sqltypes.Unicode),
+ )
+ cursor = connection.execute(query)
+ return bool(cursor.scalar())
+
+ def _get_server_version_info(self, connection):
+ v = connection.execute("select version()").scalar()
+ m = re.match(
+ r'.*(?:PostgreSQL|EnterpriseDB) '
+ r'(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel)?',
+ v)
+ if not m:
+ raise AssertionError(
+ "Could not determine version from string '%s'" % v)
+ return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
+
+ @reflection.cache
+ def get_table_oid(self, connection, table_name, schema=None, **kw):
+ """Fetch the oid for schema.table_name.
+
+ Several reflection methods require the table oid. The idea for using
+ this method is that it can be fetched one time and cached for
+ subsequent calls.
+
+ """
+ table_oid = None
+ if schema is not None:
+ schema_where_clause = "n.nspname = :schema"
+ else:
+ schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
+ query = """
+ SELECT c.oid
+ FROM pg_catalog.pg_class c
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE (%s)
+ AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f')
+ """ % schema_where_clause
+ # Since we're binding to unicode, table_name and schema_name must be
+ # unicode.
+ table_name = util.text_type(table_name)
+ if schema is not None:
+ schema = util.text_type(schema)
+ s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
+ s = s.columns(oid=sqltypes.Integer)
+ if schema:
+ s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode))
+ c = connection.execute(s, table_name=table_name, schema=schema)
+ table_oid = c.scalar()
+ if table_oid is None:
+ raise exc.NoSuchTableError(table_name)
+ return table_oid
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+ result = connection.execute(
+ sql.text("SELECT nspname FROM pg_namespace "
+ "WHERE nspname NOT LIKE 'pg_%' "
+ "ORDER BY nspname"
+ ).columns(nspname=sqltypes.Unicode))
+ return [name for name, in result]
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ result = connection.execute(
+ sql.text("SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind = 'r'"
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name)
+ return [name for name, in result]
+
+ @reflection.cache
+ def _get_foreign_table_names(self, connection, schema=None, **kw):
+ result = connection.execute(
+ sql.text("SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind = 'f'"
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name)
+ return [name for name, in result]
+
+ @reflection.cache
+ def get_view_names(
+ self, connection, schema=None,
+ include=('plain', 'materialized'), **kw):
+
+ include_kind = {'plain': 'v', 'materialized': 'm'}
+ try:
+ kinds = [include_kind[i] for i in util.to_list(include)]
+ except KeyError:
+ raise ValueError(
+ "include %r unknown, needs to be a sequence containing "
+ "one or both of 'plain' and 'materialized'" % (include,))
+ if not kinds:
+ raise ValueError(
+ "empty include, needs to be a sequence containing "
+ "one or both of 'plain' and 'materialized'")
+
+ result = connection.execute(
+ sql.text("SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind IN (%s)" %
+ (", ".join("'%s'" % elem for elem in kinds))
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name)
+ return [name for name, in result]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+ view_def = connection.scalar(
+ sql.text("SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relname = :view_name "
+ "AND c.relkind IN ('v', 'm')"
+ ).columns(view_def=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name,
+ view_name=view_name)
+ return view_def
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+ SQL_COLS = """
+ SELECT a.attname,
+ pg_catalog.format_type(a.atttypid, a.atttypmod),
+ (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
+ FROM pg_catalog.pg_attrdef d
+ WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
+ AND a.atthasdef)
+ AS DEFAULT,
+ a.attnotnull, a.attnum, a.attrelid as table_oid
+ FROM pg_catalog.pg_attribute a
+ WHERE a.attrelid = :table_oid
+ AND a.attnum > 0 AND NOT a.attisdropped
+ ORDER BY a.attnum
+ """
+ s = sql.text(SQL_COLS,
+ bindparams=[
+ sql.bindparam('table_oid', type_=sqltypes.Integer)],
+ typemap={
+ 'attname': sqltypes.Unicode,
+ 'default': sqltypes.Unicode}
+ )
+ c = connection.execute(s, table_oid=table_oid)
+ rows = c.fetchall()
+ domains = self._load_domains(connection)
+ enums = dict(
+ (
+ "%s.%s" % (rec['schema'], rec['name'])
+ if not rec['visible'] else rec['name'], rec) for rec in
+ self._load_enums(connection, schema='*')
+ )
+
+ # format columns
+ columns = []
+ for name, format_type, default, notnull, attnum, table_oid in rows:
+ column_info = self._get_column_info(
+ name, format_type, default, notnull, domains, enums, schema)
+ columns.append(column_info)
+ return columns
+
+ def _get_column_info(self, name, format_type, default,
+ notnull, domains, enums, schema):
+ # strip (*) from character varying(5), timestamp(5)
+ # with time zone, geometry(POLYGON), etc.
+ attype = re.sub(r'\(.*\)', '', format_type)
+
+ # strip '[]' from integer[], etc.
+ attype = attype.replace('[]', '')
+
+ nullable = not notnull
+ is_array = format_type.endswith('[]')
+ charlen = re.search(r'\(([\d,]+)\)', format_type)
+ if charlen:
+ charlen = charlen.group(1)
+ args = re.search(r'\((.*)\)', format_type)
+ if args and args.group(1):
+ args = tuple(re.split(r'\s*,\s*', args.group(1)))
+ else:
+ args = ()
+ kwargs = {}
+
+ if attype == 'numeric':
+ if charlen:
+ prec, scale = charlen.split(',')
+ args = (int(prec), int(scale))
+ else:
+ args = ()
+ elif attype == 'double precision':
+ args = (53, )
+ elif attype == 'integer':
+ args = ()
+ elif attype in ('timestamp with time zone',
+ 'time with time zone'):
+ kwargs['timezone'] = True
+ if charlen:
+ kwargs['precision'] = int(charlen)
+ args = ()
+ elif attype in ('timestamp without time zone',
+ 'time without time zone', 'time'):
+ kwargs['timezone'] = False
+ if charlen:
+ kwargs['precision'] = int(charlen)
+ args = ()
+ elif attype == 'bit varying':
+ kwargs['varying'] = True
+ if charlen:
+ args = (int(charlen),)
+ else:
+ args = ()
+ elif attype in ('interval', 'interval year to month',
+ 'interval day to second'):
+ if charlen:
+ kwargs['precision'] = int(charlen)
+ args = ()
+ elif charlen:
+ args = (int(charlen),)
+
+ while True:
+ if attype in self.ischema_names:
+ coltype = self.ischema_names[attype]
+ break
+ elif attype in enums:
+ enum = enums[attype]
+ coltype = ENUM
+ kwargs['name'] = enum['name']
+ if not enum['visible']:
+ kwargs['schema'] = enum['schema']
+ args = tuple(enum['labels'])
+ break
+ elif attype in domains:
+ domain = domains[attype]
+ attype = domain['attype']
+ # A table can't override whether the domain is nullable.
+ nullable = domain['nullable']
+ if domain['default'] and not default:
+ # It can, however, override the default
+ # value, but can't set it to null.
+ default = domain['default']
+ continue
+ else:
+ coltype = None
+ break
+
+ if coltype:
+ coltype = coltype(*args, **kwargs)
+ if is_array:
+ coltype = self.ischema_names['_array'](coltype)
+ else:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (attype, name))
+ coltype = sqltypes.NULLTYPE
+ # adjust the default value
+ autoincrement = False
+ if default is not None:
+ match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
+ if match is not None:
+ if issubclass(coltype._type_affinity, sqltypes.Integer):
+ autoincrement = True
+ # the default is related to a Sequence
+ sch = schema
+ if '.' not in match.group(2) and sch is not None:
+ # unconditionally quote the schema name. this could
+ # later be enhanced to obey quoting rules /
+ # "quote schema"
+ default = match.group(1) + \
+ ('"%s"' % sch) + '.' + \
+ match.group(2) + match.group(3)
+
+ column_info = dict(name=name, type=coltype, nullable=nullable,
+ default=default, autoincrement=autoincrement)
+ return column_info
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+
+ if self.server_version_info < (8, 4):
+ PK_SQL = """
+ SELECT a.attname
+ FROM
+ pg_class t
+ join pg_index ix on t.oid = ix.indrelid
+ join pg_attribute a
+ on t.oid=a.attrelid AND %s
+ WHERE
+ t.oid = :table_oid and ix.indisprimary = 't'
+ ORDER BY a.attnum
+ """ % self._pg_index_any("a.attnum", "ix.indkey")
+
+ else:
+ # unnest() and generate_subscripts() both introduced in
+ # version 8.4
+ PK_SQL = """
+ SELECT a.attname
+ FROM pg_attribute a JOIN (
+ SELECT unnest(ix.indkey) attnum,
+ generate_subscripts(ix.indkey, 1) ord
+ FROM pg_index ix
+ WHERE ix.indrelid = :table_oid AND ix.indisprimary
+ ) k ON a.attnum=k.attnum
+ WHERE a.attrelid = :table_oid
+ ORDER BY k.ord
+ """
+ t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
+ c = connection.execute(t, table_oid=table_oid)
+ cols = [r[0] for r in c.fetchall()]
+
+ PK_CONS_SQL = """
+ SELECT conname
+ FROM pg_catalog.pg_constraint r
+ WHERE r.conrelid = :table_oid AND r.contype = 'p'
+ ORDER BY 1
+ """
+ t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode})
+ c = connection.execute(t, table_oid=table_oid)
+ name = c.scalar()
+
+ return {'constrained_columns': cols, 'name': name}
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None,
+ postgresql_ignore_search_path=False, **kw):
+ preparer = self.identifier_preparer
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+
+ FK_SQL = """
+ SELECT r.conname,
+ pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
+ n.nspname as conschema
+ FROM pg_catalog.pg_constraint r,
+ pg_namespace n,
+ pg_class c
+
+ WHERE r.conrelid = :table AND
+ r.contype = 'f' AND
+ c.oid = confrelid AND
+ n.oid = c.relnamespace
+ ORDER BY 1
+ """
+ # http://www.postgresql.org/docs/9.0/static/sql-createtable.html
+ FK_REGEX = re.compile(
+ r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
+ r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
+ r'[\s]?(ON UPDATE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(ON DELETE '
+ r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
+ r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
+ r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
+ )
+
+ t = sql.text(FK_SQL, typemap={
+ 'conname': sqltypes.Unicode,
+ 'condef': sqltypes.Unicode})
+ c = connection.execute(t, table=table_oid)
+ fkeys = []
+ for conname, condef, conschema in c.fetchall():
+ m = re.search(FK_REGEX, condef).groups()
+
+ constrained_columns, referred_schema, \
+ referred_table, referred_columns, \
+ _, match, _, onupdate, _, ondelete, \
+ deferrable, _, initially = m
+
+ if deferrable is not None:
+ deferrable = True if deferrable == 'DEFERRABLE' else False
+ constrained_columns = [preparer._unquote_identifier(x)
+ for x in re.split(
+ r'\s*,\s*', constrained_columns)]
+
+ if postgresql_ignore_search_path:
+ # when ignoring search path, we use the actual schema
+ # provided it isn't the "default" schema
+ if conschema != self.default_schema_name:
+ referred_schema = conschema
+ else:
+ referred_schema = schema
+ elif referred_schema:
+ # referred_schema is the schema that we regexp'ed from
+ # pg_get_constraintdef(). If the schema is in the search
+ # path, pg_get_constraintdef() will give us None.
+ referred_schema = \
+ preparer._unquote_identifier(referred_schema)
+ elif schema is not None and schema == conschema:
+ # If the actual schema matches the schema of the table
+ # we're reflecting, then we will use that.
+ referred_schema = schema
+
+ referred_table = preparer._unquote_identifier(referred_table)
+ referred_columns = [preparer._unquote_identifier(x)
+ for x in
+ re.split(r'\s*,\s', referred_columns)]
+ fkey_d = {
+ 'name': conname,
+ 'constrained_columns': constrained_columns,
+ 'referred_schema': referred_schema,
+ 'referred_table': referred_table,
+ 'referred_columns': referred_columns,
+ 'options': {
+ 'onupdate': onupdate,
+ 'ondelete': ondelete,
+ 'deferrable': deferrable,
+ 'initially': initially,
+ 'match': match
+ }
+ }
+ fkeys.append(fkey_d)
+ return fkeys
+
+ def _pg_index_any(self, col, compare_to):
+ if self.server_version_info < (8, 1):
+ # http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us
+ # "In CVS tip you could replace this with "attnum = ANY (indkey)".
+ # Unfortunately, most array support doesn't work on int2vector in
+ # pre-8.1 releases, so I think you're kinda stuck with the above
+ # for now.
+ # regards, tom lane"
+ return "(%s)" % " OR ".join(
+ "%s[%d] = %s" % (compare_to, ind, col)
+ for ind in range(0, 10)
+ )
+ else:
+ return "%s = ANY(%s)" % (col, compare_to)
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema, **kw):
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+
+ # cast indkey as varchar since it's an int2vector,
+ # returned as a list by some drivers such as pypostgresql
+
+ if self.server_version_info < (8, 5):
+ IDX_SQL = """
+ SELECT
+ i.relname as relname,
+ ix.indisunique, ix.indexprs, ix.indpred,
+ a.attname, a.attnum, NULL, ix.indkey%s,
+ %s, am.amname
+ FROM
+ pg_class t
+ join pg_index ix on t.oid = ix.indrelid
+ join pg_class i on i.oid = ix.indexrelid
+ left outer join
+ pg_attribute a
+ on t.oid = a.attrelid and %s
+ left outer join
+ pg_am am
+ on i.relam = am.oid
+ WHERE
+ t.relkind IN ('r', 'v', 'f', 'm')
+ and t.oid = :table_oid
+ and ix.indisprimary = 'f'
+ ORDER BY
+ t.relname,
+ i.relname
+ """ % (
+ # version 8.3 here was based on observing the
+ # cast does not work in PG 8.2.4, does work in 8.3.0.
+ # nothing in PG changelogs regarding this.
+ "::varchar" if self.server_version_info >= (8, 3) else "",
+ "i.reloptions" if self.server_version_info >= (8, 2)
+ else "NULL",
+ self._pg_index_any("a.attnum", "ix.indkey")
+ )
+ else:
+ IDX_SQL = """
+ SELECT
+ i.relname as relname,
+ ix.indisunique, ix.indexprs, ix.indpred,
+ a.attname, a.attnum, c.conrelid, ix.indkey::varchar,
+ i.reloptions, am.amname
+ FROM
+ pg_class t
+ join pg_index ix on t.oid = ix.indrelid
+ join pg_class i on i.oid = ix.indexrelid
+ left outer join
+ pg_attribute a
+ on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)
+ left outer join
+ pg_constraint c
+ on (ix.indrelid = c.conrelid and
+ ix.indexrelid = c.conindid and
+ c.contype in ('p', 'u', 'x'))
+ left outer join
+ pg_am am
+ on i.relam = am.oid
+ WHERE
+ t.relkind IN ('r', 'v', 'f', 'm')
+ and t.oid = :table_oid
+ and ix.indisprimary = 'f'
+ ORDER BY
+ t.relname,
+ i.relname
+ """
+
+ t = sql.text(IDX_SQL, typemap={
+ 'relname': sqltypes.Unicode,
+ 'attname': sqltypes.Unicode})
+ c = connection.execute(t, table_oid=table_oid)
+
+ indexes = defaultdict(lambda: defaultdict(dict))
+
+ sv_idx_name = None
+ for row in c.fetchall():
+ (idx_name, unique, expr, prd, col,
+ col_num, conrelid, idx_key, options, amname) = row
+
+ if expr:
+ if idx_name != sv_idx_name:
+ util.warn(
+ "Skipped unsupported reflection of "
+ "expression-based index %s"
+ % idx_name)
+ sv_idx_name = idx_name
+ continue
+
+ if prd and not idx_name == sv_idx_name:
+ util.warn(
+ "Predicate of partial index %s ignored during reflection"
+ % idx_name)
+ sv_idx_name = idx_name
+
+ has_idx = idx_name in indexes
+ index = indexes[idx_name]
+ if col is not None:
+ index['cols'][col_num] = col
+ if not has_idx:
+ index['key'] = [int(k.strip()) for k in idx_key.split()]
+ index['unique'] = unique
+ if conrelid is not None:
+ index['duplicates_constraint'] = idx_name
+ if options:
+ index['options'] = dict(
+ [option.split("=") for option in options])
+
+ # it *might* be nice to include that this is 'btree' in the
+ # reflection info. But we don't want an Index object
+ # to have a ``postgresql_using`` in it that is just the
+ # default, so for the moment leaving this out.
+ if amname and amname != 'btree':
+ index['amname'] = amname
+
+ result = []
+ for name, idx in indexes.items():
+ entry = {
+ 'name': name,
+ 'unique': idx['unique'],
+ 'column_names': [idx['cols'][i] for i in idx['key']]
+ }
+ if 'duplicates_constraint' in idx:
+ entry['duplicates_constraint'] = idx['duplicates_constraint']
+ if 'options' in idx:
+ entry.setdefault(
+ 'dialect_options', {})["postgresql_with"] = idx['options']
+ if 'amname' in idx:
+ entry.setdefault(
+ 'dialect_options', {})["postgresql_using"] = idx['amname']
+ result.append(entry)
+ return result
+
+ @reflection.cache
+ def get_unique_constraints(self, connection, table_name,
+ schema=None, **kw):
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+
+ UNIQUE_SQL = """
+ SELECT
+ cons.conname as name,
+ cons.conkey as key,
+ a.attnum as col_num,
+ a.attname as col_name
+ FROM
+ pg_catalog.pg_constraint cons
+ join pg_attribute a
+ on cons.conrelid = a.attrelid AND
+ a.attnum = ANY(cons.conkey)
+ WHERE
+ cons.conrelid = :table_oid AND
+ cons.contype = 'u'
+ """
+
+ t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode})
+ c = connection.execute(t, table_oid=table_oid)
+
+ uniques = defaultdict(lambda: defaultdict(dict))
+ for row in c.fetchall():
+ uc = uniques[row.name]
+ uc["key"] = row.key
+ uc["cols"][row.col_num] = row.col_name
+
+ return [
+ {'name': name,
+ 'column_names': [uc["cols"][i] for i in uc["key"]]}
+ for name, uc in uniques.items()
+ ]
+
+ @reflection.cache
+ def get_check_constraints(
+ self, connection, table_name, schema=None, **kw):
+ table_oid = self.get_table_oid(connection, table_name, schema,
+ info_cache=kw.get('info_cache'))
+
+ CHECK_SQL = """
+ SELECT
+ cons.conname as name,
+ cons.consrc as src
+ FROM
+ pg_catalog.pg_constraint cons
+ WHERE
+ cons.conrelid = :table_oid AND
+ cons.contype = 'c'
+ """
+
+ c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid)
+
+ return [
+ {'name': name,
+ 'sqltext': src[1:-1]}
+ for name, src in c.fetchall()
+ ]
+
+ def _load_enums(self, connection, schema=None):
+ schema = schema or self.default_schema_name
+ if not self.supports_native_enum:
+ return {}
+
+ # Load data types for enums:
+ SQL_ENUMS = """
+ SELECT t.typname as "name",
+ -- no enum defaults in 8.4 at least
+ -- t.typdefault as "default",
+ pg_catalog.pg_type_is_visible(t.oid) as "visible",
+ n.nspname as "schema",
+ e.enumlabel as "label"
+ FROM pg_catalog.pg_type t
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
+ WHERE t.typtype = 'e'
+ """
+
+ if schema != '*':
+ SQL_ENUMS += "AND n.nspname = :schema "
+
+ # e.oid gives us label order within an enum
+ SQL_ENUMS += 'ORDER BY "schema", "name", e.oid'
+
+ s = sql.text(SQL_ENUMS, typemap={
+ 'attname': sqltypes.Unicode,
+ 'label': sqltypes.Unicode})
+
+ if schema != '*':
+ s = s.bindparams(schema=schema)
+
+ c = connection.execute(s)
+
+ enums = []
+ enum_by_name = {}
+ for enum in c.fetchall():
+ key = (enum['schema'], enum['name'])
+ if key in enum_by_name:
+ enum_by_name[key]['labels'].append(enum['label'])
+ else:
+ enum_by_name[key] = enum_rec = {
+ 'name': enum['name'],
+ 'schema': enum['schema'],
+ 'visible': enum['visible'],
+ 'labels': [enum['label']],
+ }
+ enums.append(enum_rec)
+
+ return enums
+
+ def _load_domains(self, connection):
+ # Load data types for domains:
+ SQL_DOMAINS = """
+ SELECT t.typname as "name",
+ pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
+ not t.typnotnull as "nullable",
+ t.typdefault as "default",
+ pg_catalog.pg_type_is_visible(t.oid) as "visible",
+ n.nspname as "schema"
+ FROM pg_catalog.pg_type t
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE t.typtype = 'd'
+ """
+
+ s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode})
+ c = connection.execute(s)
+
+ domains = {}
+ for domain in c.fetchall():
+ # strip (30) from character varying(30)
+ attype = re.search(r'([^\(]+)', domain['attype']).group(1)
+ if domain['visible']:
+ # 'visible' just means whether or not the domain is in a
+ # schema that's on the search path -- or not overridden by
+ # a schema with higher precedence. If it's not visible,
+ # it will be prefixed with the schema-name when it's used.
+ name = domain['name']
+ else:
+ name = "%s.%s" % (domain['schema'], domain['name'])
+
+ domains[name] = {
+ 'attype': attype,
+ 'nullable': domain['nullable'],
+ 'default': domain['default']
+ }
+
+ return domains
diff --git a/app/lib/sqlalchemy/dialects/postgresql/dml.py b/app/lib/sqlalchemy/dialects/postgresql/dml.py
new file mode 100644
index 0000000..bfdfbfa
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/dml.py
@@ -0,0 +1,213 @@
+# postgresql/on_conflict.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from ...sql.elements import ClauseElement, _literal_as_binds
+from ...sql.dml import Insert as StandardInsert
+from ...sql.expression import alias
+from ...sql import schema
+from ...util.langhelpers import public_factory
+from ...sql.base import _generative
+from ... import util
+from . import ext
+
+__all__ = ('Insert', 'insert')
+
+
+class Insert(StandardInsert):
+ """PostgreSQL-specific implementation of INSERT.
+
+ Adds methods for PG-specific syntaxes such as ON CONFLICT.
+
+ .. versionadded:: 1.1
+
+ """
+
+ @util.memoized_property
+ def excluded(self):
+ """Provide the ``excluded`` namespace for an ON CONFLICT statement
+
+ PG's ON CONFLICT clause allows reference to the row that would
+ be inserted, known as ``excluded``. This attribute provides
+ all columns in this row to be referenaceable.
+
+ .. seealso::
+
+ :ref:`postgresql_insert_on_conflict` - example of how
+ to use :attr:`.Insert.excluded`
+
+ """
+ return alias(self.table, name='excluded').columns
+
+ @_generative
+ def on_conflict_do_update(
+ self,
+ constraint=None, index_elements=None,
+ index_where=None, set_=None, where=None):
+ """
+ Specifies a DO UPDATE SET action for ON CONFLICT clause.
+
+ Either the ``constraint`` or ``index_elements`` argument is
+ required, but only one of these can be specified.
+
+ :param constraint:
+ The name of a unique or exclusion constraint on the table,
+ or the constraint object itself if it has a .name attribute.
+
+ :param index_elements:
+ A sequence consisting of string column names, :class:`.Column`
+ objects, or other column expression objects that will be used
+ to infer a target index.
+
+ :param index_where:
+ Additional WHERE criterion that can be used to infer a
+ conditional target index.
+
+ :param set_:
+ Required argument. A dictionary or other mapping object
+ with column names as keys and expressions or literals as values,
+ specifying the ``SET`` actions to take.
+ If the target :class:`.Column` specifies a ".key" attribute distinct
+ from the column name, that key should be used.
+
+ .. warning:: This dictionary does **not** take into account
+ Python-specified default UPDATE values or generation functions,
+ e.g. those specified using :paramref:`.Column.onupdate`.
+ These values will not be exercised for an ON CONFLICT style of
+ UPDATE, unless they are manually specified in the
+ :paramref:`.Insert.on_conflict_do_update.set_` dictionary.
+
+ :param where:
+ Optional argument. If present, can be a literal SQL
+ string or an acceptable expression for a ``WHERE`` clause
+ that restricts the rows affected by ``DO UPDATE SET``. Rows
+ not meeting the ``WHERE`` condition will not be updated
+ (effectively a ``DO NOTHING`` for those rows).
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`postgresql_insert_on_conflict`
+
+ """
+ self._post_values_clause = OnConflictDoUpdate(
+ constraint, index_elements, index_where, set_, where)
+ return self
+
+ @_generative
+ def on_conflict_do_nothing(
+ self,
+ constraint=None, index_elements=None, index_where=None):
+ """
+ Specifies a DO NOTHING action for ON CONFLICT clause.
+
+ The ``constraint`` and ``index_elements`` arguments
+ are optional, but only one of these can be specified.
+
+ :param constraint:
+ The name of a unique or exclusion constraint on the table,
+ or the constraint object itself if it has a .name attribute.
+
+ :param index_elements:
+ A sequence consisting of string column names, :class:`.Column`
+ objects, or other column expression objects that will be used
+ to infer a target index.
+
+ :param index_where:
+ Additional WHERE criterion that can be used to infer a
+ conditional target index.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`postgresql_insert_on_conflict`
+
+ """
+ self._post_values_clause = OnConflictDoNothing(
+ constraint, index_elements, index_where)
+ return self
+
+insert = public_factory(Insert, '.dialects.postgresql.insert')
+
+
+class OnConflictClause(ClauseElement):
+ def __init__(
+ self,
+ constraint=None,
+ index_elements=None,
+ index_where=None):
+
+ if constraint is not None:
+ if not isinstance(constraint, util.string_types) and \
+ isinstance(constraint, (
+ schema.Index, schema.Constraint,
+ ext.ExcludeConstraint)):
+ constraint = getattr(constraint, 'name') or constraint
+
+ if constraint is not None:
+ if index_elements is not None:
+ raise ValueError(
+ "'constraint' and 'index_elements' are mutually exclusive")
+
+ if isinstance(constraint, util.string_types):
+ self.constraint_target = constraint
+ self.inferred_target_elements = None
+ self.inferred_target_whereclause = None
+ elif isinstance(constraint, schema.Index):
+ index_elements = constraint.expressions
+ index_where = \
+ constraint.dialect_options['postgresql'].get("where")
+ elif isinstance(constraint, ext.ExcludeConstraint):
+ index_elements = constraint.columns
+ index_where = constraint.where
+ else:
+ index_elements = constraint.columns
+ index_where = \
+ constraint.dialect_options['postgresql'].get("where")
+
+ if index_elements is not None:
+ self.constraint_target = None
+ self.inferred_target_elements = index_elements
+ self.inferred_target_whereclause = index_where
+ elif constraint is None:
+ self.constraint_target = self.inferred_target_elements = \
+ self.inferred_target_whereclause = None
+
+
+class OnConflictDoNothing(OnConflictClause):
+ __visit_name__ = 'on_conflict_do_nothing'
+
+
+class OnConflictDoUpdate(OnConflictClause):
+ __visit_name__ = 'on_conflict_do_update'
+
+ def __init__(
+ self,
+ constraint=None,
+ index_elements=None,
+ index_where=None,
+ set_=None,
+ where=None):
+ super(OnConflictDoUpdate, self).__init__(
+ constraint=constraint,
+ index_elements=index_elements,
+ index_where=index_where)
+
+ if self.inferred_target_elements is None and \
+ self.constraint_target is None:
+ raise ValueError(
+ "Either constraint or index_elements, "
+ "but not both, must be specified unless DO NOTHING")
+
+ if (not isinstance(set_, dict) or not set_):
+ raise ValueError("set parameter must be a non-empty dictionary")
+ self.update_values_to_set = [
+ (key, value)
+ for key, value in set_.items()
+ ]
+ self.update_whereclause = where
diff --git a/app/lib/sqlalchemy/dialects/postgresql/ext.py b/app/lib/sqlalchemy/dialects/postgresql/ext.py
new file mode 100644
index 0000000..55eded9
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/ext.py
@@ -0,0 +1,218 @@
+# postgresql/ext.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from ...sql import expression
+from ...sql import elements
+from ...sql import functions
+from ...sql.schema import ColumnCollectionConstraint
+from .array import ARRAY
+
+
+class aggregate_order_by(expression.ColumnElement):
+ """Represent a PostgreSQL aggregate order by expression.
+
+ E.g.::
+
+ from sqlalchemy.dialects.postgresql import aggregate_order_by
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+ would represent the expression::
+
+ SELECT array_agg(a ORDER BY b DESC) FROM table;
+
+ Similarly::
+
+ expr = func.string_agg(
+ table.c.a,
+ aggregate_order_by(literal_column("','"), table.c.a)
+ )
+ stmt = select([expr])
+
+ Would represent::
+
+ SELECT string_agg(a, ',' ORDER BY a) FROM table;
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :class:`.array_agg`
+
+ """
+
+ __visit_name__ = 'aggregate_order_by'
+
+ def __init__(self, target, order_by):
+ self.target = elements._literal_as_binds(target)
+ self.order_by = elements._literal_as_binds(order_by)
+
+ def self_group(self, against=None):
+ return self
+
+ def get_children(self, **kwargs):
+ return self.target, self.order_by
+
+ def _copy_internals(self, clone=elements._clone, **kw):
+ self.target = clone(self.target, **kw)
+ self.order_by = clone(self.order_by, **kw)
+
+ @property
+ def _from_objects(self):
+ return self.target._from_objects + self.order_by._from_objects
+
+
+class ExcludeConstraint(ColumnCollectionConstraint):
+ """A table-level EXCLUDE constraint.
+
+ Defines an EXCLUDE constraint as described in the `postgres
+ documentation`__.
+
+ __ http://www.postgresql.org/docs/9.0/\
+static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+ """
+
+ __visit_name__ = 'exclude_constraint'
+
+ where = None
+
+ def __init__(self, *elements, **kw):
+ r"""
+ Create an :class:`.ExcludeConstraint` object.
+
+ E.g.::
+
+ const = ExcludeConstraint(
+ (Column('period'), '&&'),
+ (Column('group'), '='),
+ where=(Column('group') != 'some group')
+ )
+
+ The constraint is normally embedded into the :class:`.Table` construct
+ directly, or added later using :meth:`.append_constraint`::
+
+ some_table = Table(
+ 'some_table', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('period', TSRANGE()),
+ Column('group', String)
+ )
+
+ some_table.append_constraint(
+ ExcludeConstraint(
+ (some_table.c.period, '&&'),
+ (some_table.c.group, '='),
+ where=some_table.c.group != 'some group',
+ name='some_table_excl_const'
+ )
+ )
+
+ :param \*elements:
+ A sequence of two tuples of the form ``(column, operator)`` where
+ "column" is a SQL expression element or a raw SQL string, most
+ typically a :class:`.Column` object,
+ and "operator" is a string containing the operator to use.
+
+ .. note::
+
+ A plain string passed for the value of "column" is interpreted
+ as an arbitrary SQL expression; when passing a plain string,
+ any necessary quoting and escaping syntaxes must be applied
+ manually. In order to specify a column name when a
+ :class:`.Column` object is not available, while ensuring that
+ any necessary quoting rules take effect, an ad-hoc
+ :class:`.Column` or :func:`.sql.expression.column` object may
+ be used.
+
+ :param name:
+ Optional, the in-database name of this constraint.
+
+ :param deferrable:
+ Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
+ issuing DDL for this constraint.
+
+ :param initially:
+ Optional string. If set, emit INITIALLY when issuing DDL
+ for this constraint.
+
+ :param using:
+ Optional string. If set, emit USING when issuing DDL
+ for this constraint. Defaults to 'gist'.
+
+ :param where:
+ Optional SQL expression construct or literal SQL string.
+ If set, emit WHERE when issuing DDL
+ for this constraint.
+
+ .. note::
+
+ A plain string passed here is interpreted as an arbitrary SQL
+ expression; when passing a plain string, any necessary quoting
+ and escaping syntaxes must be applied manually.
+
+ """
+ columns = []
+ render_exprs = []
+ self.operators = {}
+
+ expressions, operators = zip(*elements)
+
+ for (expr, column, strname, add_element), operator in zip(
+ self._extract_col_expression_collection(expressions),
+ operators
+ ):
+ if add_element is not None:
+ columns.append(add_element)
+
+ name = column.name if column is not None else strname
+
+ if name is not None:
+ # backwards compat
+ self.operators[name] = operator
+
+ expr = expression._literal_as_text(expr)
+
+ render_exprs.append(
+ (expr, name, operator)
+ )
+
+ self._render_exprs = render_exprs
+ ColumnCollectionConstraint.__init__(
+ self,
+ *columns,
+ name=kw.get('name'),
+ deferrable=kw.get('deferrable'),
+ initially=kw.get('initially')
+ )
+ self.using = kw.get('using', 'gist')
+ where = kw.get('where')
+ if where is not None:
+ self.where = expression._literal_as_text(where)
+
+ def copy(self, **kw):
+ elements = [(col, self.operators[col])
+ for col in self.columns.keys()]
+ c = self.__class__(*elements,
+ name=self.name,
+ deferrable=self.deferrable,
+ initially=self.initially,
+ where=self.where,
+ using=self.using)
+ c.dispatch._update(self.dispatch)
+ return c
+
+
+def array_agg(*arg, **kw):
+ """PostgreSQL-specific form of :class:`.array_agg`, ensures
+ return type is :class:`.postgresql.ARRAY` and not
+ the plain :class:`.types.ARRAY`.
+
+ .. versionadded:: 1.1
+
+ """
+ kw['type_'] = ARRAY(functions._type_from_args(arg))
+ return functions.func.array_agg(*arg, **kw)
diff --git a/app/lib/sqlalchemy/dialects/postgresql/hstore.py b/app/lib/sqlalchemy/dialects/postgresql/hstore.py
new file mode 100644
index 0000000..952c6ed
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -0,0 +1,420 @@
+# postgresql/hstore.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+
+from .base import ischema_names
+from .array import ARRAY
+from ... import types as sqltypes
+from ...sql import functions as sqlfunc
+from ...sql import operators
+from ... import util
+
+__all__ = ('HSTORE', 'hstore')
+
+idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
+
+GETITEM = operators.custom_op(
+ "->", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+HAS_KEY = operators.custom_op(
+ "?", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+HAS_ALL = operators.custom_op(
+ "?&", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+HAS_ANY = operators.custom_op(
+ "?|", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+CONTAINS = operators.custom_op(
+ "@>", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+
+class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
+ """Represent the PostgreSQL HSTORE type.
+
+ The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
+
+ data_table = Table('data_table', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', HSTORE)
+ )
+
+ with engine.connect() as conn:
+ conn.execute(
+ data_table.insert(),
+ data = {"key1": "value1", "key2": "value2"}
+ )
+
+ :class:`.HSTORE` provides for a wide range of operations, including:
+
+ * Index operations::
+
+ data_table.c.data['some key'] == 'some value'
+
+ * Containment operations::
+
+ data_table.c.data.has_key('some key')
+
+ data_table.c.data.has_all(['one', 'two', 'three'])
+
+ * Concatenation::
+
+ data_table.c.data + {"k1": "v1"}
+
+ For a full list of special methods see
+ :class:`.HSTORE.comparator_factory`.
+
+ For usage with the SQLAlchemy ORM, it may be desirable to combine
+ the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary
+ now part of the :mod:`sqlalchemy.ext.mutable`
+ extension. This extension will allow "in-place" changes to the
+ dictionary, e.g. addition of new keys or replacement/removal of existing
+ keys to/from the current dictionary, to produce events which will be
+ detected by the unit of work::
+
+ from sqlalchemy.ext.mutable import MutableDict
+
+ class MyClass(Base):
+ __tablename__ = 'data_table'
+
+ id = Column(Integer, primary_key=True)
+ data = Column(MutableDict.as_mutable(HSTORE))
+
+ my_object = session.query(MyClass).one()
+
+ # in-place mutation, requires Mutable extension
+ # in order for the ORM to detect
+ my_object.data['some_key'] = 'some value'
+
+ session.commit()
+
+ When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
+ will not be alerted to any changes to the contents of an existing
+ dictionary, unless that dictionary value is re-assigned to the
+ HSTORE-attribute itself, thus generating a change event.
+
+ .. versionadded:: 0.8
+
+ .. seealso::
+
+ :class:`.hstore` - render the PostgreSQL ``hstore()`` function.
+
+
+ """
+
+ __visit_name__ = 'HSTORE'
+ hashable = False
+ text_type = sqltypes.Text()
+
+ def __init__(self, text_type=None):
+ """Construct a new :class:`.HSTORE`.
+
+ :param text_type: the type that should be used for indexed values.
+ Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1.0
+
+ """
+ if text_type is not None:
+ self.text_type = text_type
+
+ class Comparator(
+ sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
+ """Define comparison operations for :class:`.HSTORE`."""
+
+ def has_key(self, other):
+ """Boolean expression. Test for presence of a key. Note that the
+ key may be a SQLA expression.
+ """
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
+
+ def has_all(self, other):
+ """Boolean expression. Test for presence of all keys in jsonb
+ """
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
+
+ def has_any(self, other):
+ """Boolean expression. Test for presence of any key in jsonb
+ """
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
+
+ def contains(self, other, **kwargs):
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
+ """
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+ def contained_by(self, other):
+ """Boolean expression. Test if keys are a proper subset of the
+ keys of the argument jsonb expression.
+ """
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ def _setup_getitem(self, index):
+ return GETITEM, index, self.type.text_type
+
+ def defined(self, key):
+ """Boolean expression. Test for presence of a non-NULL value for
+ the key. Note that the key may be a SQLA expression.
+ """
+ return _HStoreDefinedFunction(self.expr, key)
+
+ def delete(self, key):
+ """HStore expression. Returns the contents of this hstore with the
+ given key deleted. Note that the key may be a SQLA expression.
+ """
+ if isinstance(key, dict):
+ key = _serialize_hstore(key)
+ return _HStoreDeleteFunction(self.expr, key)
+
+ def slice(self, array):
+ """HStore expression. Returns a subset of an hstore defined by
+ array of keys.
+ """
+ return _HStoreSliceFunction(self.expr, array)
+
+ def keys(self):
+ """Text array expression. Returns array of keys."""
+ return _HStoreKeysFunction(self.expr)
+
+ def vals(self):
+ """Text array expression. Returns array of values."""
+ return _HStoreValsFunction(self.expr)
+
+ def array(self):
+ """Text array expression. Returns array of alternating keys and
+ values.
+ """
+ return _HStoreArrayFunction(self.expr)
+
+ def matrix(self):
+ """Text array expression. Returns array of [key, value] pairs."""
+ return _HStoreMatrixFunction(self.expr)
+
+ comparator_factory = Comparator
+
+ def bind_processor(self, dialect):
+ if util.py2k:
+ encoding = dialect.encoding
+
+ def process(value):
+ if isinstance(value, dict):
+ return _serialize_hstore(value).encode(encoding)
+ else:
+ return value
+ else:
+ def process(value):
+ if isinstance(value, dict):
+ return _serialize_hstore(value)
+ else:
+ return value
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if util.py2k:
+ encoding = dialect.encoding
+
+ def process(value):
+ if value is not None:
+ return _parse_hstore(value.decode(encoding))
+ else:
+ return value
+ else:
+ def process(value):
+ if value is not None:
+ return _parse_hstore(value)
+ else:
+ return value
+ return process
+
+
+ischema_names['hstore'] = HSTORE
+
+
+class hstore(sqlfunc.GenericFunction):
+ """Construct an hstore value within a SQL expression using the
+ PostgreSQL ``hstore()`` function.
+
+ The :class:`.hstore` function accepts one or two arguments as described
+ in the PostgreSQL documentation.
+
+ E.g.::
+
+ from sqlalchemy.dialects.postgresql import array, hstore
+
+ select([hstore('key1', 'value1')])
+
+ select([
+ hstore(
+ array(['key1', 'key2', 'key3']),
+ array(['value1', 'value2', 'value3'])
+ )
+ ])
+
+ .. versionadded:: 0.8
+
+ .. seealso::
+
+ :class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
+
+ """
+ type = HSTORE
+ name = 'hstore'
+
+
+class _HStoreDefinedFunction(sqlfunc.GenericFunction):
+ type = sqltypes.Boolean
+ name = 'defined'
+
+
+class _HStoreDeleteFunction(sqlfunc.GenericFunction):
+ type = HSTORE
+ name = 'delete'
+
+
+class _HStoreSliceFunction(sqlfunc.GenericFunction):
+ type = HSTORE
+ name = 'slice'
+
+
+class _HStoreKeysFunction(sqlfunc.GenericFunction):
+ type = ARRAY(sqltypes.Text)
+ name = 'akeys'
+
+
+class _HStoreValsFunction(sqlfunc.GenericFunction):
+ type = ARRAY(sqltypes.Text)
+ name = 'avals'
+
+
+class _HStoreArrayFunction(sqlfunc.GenericFunction):
+ type = ARRAY(sqltypes.Text)
+ name = 'hstore_to_array'
+
+
+class _HStoreMatrixFunction(sqlfunc.GenericFunction):
+ type = ARRAY(sqltypes.Text)
+ name = 'hstore_to_matrix'
+
+
+#
+# parsing. note that none of this is used with the psycopg2 backend,
+# which provides its own native extensions.
+#
+
+# My best guess at the parsing rules of hstore literals, since no formal
+# grammar is given. This is mostly reverse engineered from PG's input parser
+# behavior.
+HSTORE_PAIR_RE = re.compile(r"""
+(
+ "(?P (\\ . | [^"])* )" # Quoted key
+)
+[ ]* => [ ]* # Pair operator, optional adjoining whitespace
+(
+ (?P NULL ) # NULL value
+ | "(?P (\\ . | [^"])* )" # Quoted value
+)
+""", re.VERBOSE)
+
+HSTORE_DELIMITER_RE = re.compile(r"""
+[ ]* , [ ]*
+""", re.VERBOSE)
+
+
+def _parse_error(hstore_str, pos):
+ """format an unmarshalling error."""
+
+ ctx = 20
+ hslen = len(hstore_str)
+
+ parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
+ residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
+
+ if len(parsed_tail) > ctx:
+ parsed_tail = '[...]' + parsed_tail[1:]
+ if len(residual) > ctx:
+ residual = residual[:-1] + '[...]'
+
+ return "After %r, could not parse residual at position %d: %r" % (
+ parsed_tail, pos, residual)
+
+
+def _parse_hstore(hstore_str):
+ """Parse an hstore from its literal string representation.
+
+ Attempts to approximate PG's hstore input parsing rules as closely as
+ possible. Although currently this is not strictly necessary, since the
+ current implementation of hstore's output syntax is stricter than what it
+ accepts as input, the documentation makes no guarantees that will always
+ be the case.
+
+
+
+ """
+ result = {}
+ pos = 0
+ pair_match = HSTORE_PAIR_RE.match(hstore_str)
+
+ while pair_match is not None:
+ key = pair_match.group('key').replace(r'\"', '"').replace(
+ "\\\\", "\\")
+ if pair_match.group('value_null'):
+ value = None
+ else:
+ value = pair_match.group('value').replace(
+ r'\"', '"').replace("\\\\", "\\")
+ result[key] = value
+
+ pos += pair_match.end()
+
+ delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
+ if delim_match is not None:
+ pos += delim_match.end()
+
+ pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
+
+ if pos != len(hstore_str):
+ raise ValueError(_parse_error(hstore_str, pos))
+
+ return result
+
+
+def _serialize_hstore(val):
+ """Serialize a dictionary into an hstore literal. Keys and values must
+ both be strings (except None for values).
+
+ """
+ def esc(s, position):
+ if position == 'value' and s is None:
+ return 'NULL'
+ elif isinstance(s, util.string_types):
+ return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
+ else:
+ raise ValueError("%r in %s position is not a string." %
+ (s, position))
+
+ return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
+ for k, v in val.items())
+
+
diff --git a/app/lib/sqlalchemy/dialects/postgresql/json.py b/app/lib/sqlalchemy/dialects/postgresql/json.py
new file mode 100644
index 0000000..f145806
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/json.py
@@ -0,0 +1,301 @@
+# postgresql/json.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+from __future__ import absolute_import
+
+import json
+import collections
+
+from .base import ischema_names, colspecs
+from ... import types as sqltypes
+from ...sql import operators
+from ...sql import elements
+from ... import util
+
+__all__ = ('JSON', 'JSONB')
+
+idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
+
+ASTEXT = operators.custom_op(
+ "->>", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+JSONPATH_ASTEXT = operators.custom_op(
+ "#>>", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+
+HAS_KEY = operators.custom_op(
+ "?", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+HAS_ALL = operators.custom_op(
+ "?&", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+HAS_ANY = operators.custom_op(
+ "?|", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+CONTAINS = operators.custom_op(
+ "@>", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=idx_precedence, natural_self_precedent=True,
+ eager_grouping=True
+)
+
+
+class JSONPathType(sqltypes.JSON.JSONPathType):
+ def bind_processor(self, dialect):
+ super_proc = self.string_bind_processor(dialect)
+
+ def process(value):
+ assert isinstance(value, collections.Sequence)
+ tokens = [util.text_type(elem)for elem in value]
+ value = "{%s}" % (", ".join(tokens))
+ if super_proc:
+ value = super_proc(value)
+ return value
+
+ return process
+
+ def literal_processor(self, dialect):
+ super_proc = self.string_literal_processor(dialect)
+
+ def process(value):
+ assert isinstance(value, collections.Sequence)
+ tokens = [util.text_type(elem)for elem in value]
+ value = "{%s}" % (", ".join(tokens))
+ if super_proc:
+ value = super_proc(value)
+ return value
+
+ return process
+
+colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
+
+
+class JSON(sqltypes.JSON):
+ """Represent the PostgreSQL JSON type.
+
+ This type is a specialization of the Core-level :class:`.types.JSON`
+ type. Be sure to read the documentation for :class:`.types.JSON` for
+ important tips regarding treatment of NULL values and ORM use.
+
+ .. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a PostgreSQL-
+ specific specialization of the new :class:`.types.JSON` type.
+
+ The operators provided by the PostgreSQL version of :class:`.JSON`
+ include:
+
+ * Index operations (the ``->`` operator)::
+
+ data_table.c.data['some key']
+
+ data_table.c.data[5]
+
+
+ * Index operations returning text (the ``->>`` operator)::
+
+ data_table.c.data['some key'].astext == 'some value'
+
+ * Index operations with CAST
+ (equivalent to ``CAST(col ->> ['some key'] AS )``)::
+
+ data_table.c.data['some key'].astext.cast(Integer) == 5
+
+ * Path index operations (the ``#>`` operator)::
+
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
+
+ * Path index operations returning text (the ``#>>`` operator)::
+
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
+'some value'
+
+ .. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
+ JSON objects now requires that the :attr:`.JSON.Comparator.astext`
+ modifier be called explicitly, if the cast works only from a textual
+ string.
+
+ Index operations return an expression object whose type defaults to
+ :class:`.JSON` by default, so that further JSON-oriented instructions
+ may be called upon the result type.
+
+ Custom serializers and deserializers are specified at the dialect level,
+ that is using :func:`.create_engine`. The reason for this is that when
+ using psycopg2, the DBAPI only allows serializers at the per-cursor
+ or per-connection level. E.g.::
+
+ engine = create_engine("postgresql://scott:tiger@localhost/test",
+ json_serializer=my_serialize_fn,
+ json_deserializer=my_deserialize_fn
+ )
+
+ When using the psycopg2 dialect, the json_deserializer is registered
+ against the database using ``psycopg2.extras.register_default_json``.
+
+ .. seealso::
+
+ :class:`.types.JSON` - Core level JSON type
+
+ :class:`.JSONB`
+
+ """
+
+ astext_type = sqltypes.Text()
+
+ def __init__(self, none_as_null=False, astext_type=None):
+ """Construct a :class:`.JSON` type.
+
+ :param none_as_null: if True, persist the value ``None`` as a
+ SQL NULL value, not the JSON encoding of ``null``. Note that
+ when this flag is False, the :func:`.null` construct can still
+ be used to persist a NULL value::
+
+ from sqlalchemy import null
+ conn.execute(table.insert(), data=null())
+
+ .. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
+ is now supported in order to persist a NULL value.
+
+ .. seealso::
+
+ :attr:`.JSON.NULL`
+
+ :param astext_type: the type to use for the
+ :attr:`.JSON.Comparator.astext`
+ accessor on indexed attributes. Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1
+
+ """
+ super(JSON, self).__init__(none_as_null=none_as_null)
+ if astext_type is not None:
+ self.astext_type = astext_type
+
+ class Comparator(sqltypes.JSON.Comparator):
+ """Define comparison operations for :class:`.JSON`."""
+
+ @property
+ def astext(self):
+ """On an indexed expression, use the "astext" (e.g. "->>")
+ conversion when rendered in SQL.
+
+ E.g.::
+
+ select([data_table.c.data['some key'].astext])
+
+ .. seealso::
+
+ :meth:`.ColumnElement.cast`
+
+ """
+
+ if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
+ return self.expr.left.operate(
+ JSONPATH_ASTEXT,
+ self.expr.right, result_type=self.type.astext_type)
+ else:
+ return self.expr.left.operate(
+ ASTEXT, self.expr.right, result_type=self.type.astext_type)
+
+ comparator_factory = Comparator
+
+
+colspecs[sqltypes.JSON] = JSON
+ischema_names['json'] = JSON
+
+
+class JSONB(JSON):
+ """Represent the PostgreSQL JSONB type.
+
+ The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.::
+
+ data_table = Table('data_table', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', JSONB)
+ )
+
+ with engine.connect() as conn:
+ conn.execute(
+ data_table.insert(),
+ data = {"key1": "value1", "key2": "value2"}
+ )
+
+ The :class:`.JSONB` type includes all operations provided by
+ :class:`.JSON`, including the same behaviors for indexing operations.
+ It also adds additional operators specific to JSONB, including
+ :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
+ :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
+ and :meth:`.JSONB.Comparator.contained_by`.
+
+ Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
+ in-place changes when used with the ORM, unless the
+ :mod:`sqlalchemy.ext.mutable` extension is used.
+
+ Custom serializers and deserializers
+ are shared with the :class:`.JSON` class, using the ``json_serializer``
+ and ``json_deserializer`` keyword arguments. These must be specified
+ at the dialect level using :func:`.create_engine`. When using
+ psycopg2, the serializers are associated with the jsonb type using
+ ``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
+ in the same way that ``psycopg2.extras.register_default_json`` is used
+ to register these handlers with the json type.
+
+ .. versionadded:: 0.9.7
+
+ .. seealso::
+
+ :class:`.JSON`
+
+ """
+
+ __visit_name__ = 'JSONB'
+
+ class Comparator(JSON.Comparator):
+ """Define comparison operations for :class:`.JSON`."""
+
+ def has_key(self, other):
+ """Boolean expression. Test for presence of a key. Note that the
+ key may be a SQLA expression.
+ """
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
+
+ def has_all(self, other):
+ """Boolean expression. Test for presence of all keys in jsonb
+ """
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
+
+ def has_any(self, other):
+ """Boolean expression. Test for presence of any key in jsonb
+ """
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
+
+ def contains(self, other, **kwargs):
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
+ """
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+ def contained_by(self, other):
+ """Boolean expression. Test if keys are a proper subset of the
+ keys of the argument jsonb expression.
+ """
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
+
+ischema_names['jsonb'] = JSONB
diff --git a/app/lib/sqlalchemy/dialects/postgresql/pg8000.py b/app/lib/sqlalchemy/dialects/postgresql/pg8000.py
new file mode 100644
index 0000000..8c019a2
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -0,0 +1,265 @@
+# postgresql/pg8000.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: postgresql+pg8000
+ :name: pg8000
+ :dbapi: pg8000
+ :connectstring: \
+postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
+ :url: https://pythonhosted.org/pg8000/
+
+
+.. _pg8000_unicode:
+
+Unicode
+-------
+
+pg8000 will encode / decode string values between it and the server using the
+PostgreSQL ``client_encoding`` parameter; by default this is the value in
+the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
+Typically, this can be changed to ``utf-8``, as a more useful default::
+
+ #client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+ client_encoding = utf8
+
+The ``client_encoding`` can be overridden for a session by executing the SQL:
+
+SET CLIENT_ENCODING TO 'utf8';
+
+SQLAlchemy will execute this SQL on all new connections based on the value
+passed to :func:`.create_engine` using the ``client_encoding`` parameter::
+
+ engine = create_engine(
+ "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
+
+
+.. _pg8000_isolation_level:
+
+pg8000 Transaction Isolation Level
+-------------------------------------
+
+The pg8000 dialect offers the same isolation level settings as that
+of the :ref:`psycopg2 ` dialect:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using
+ pg8000.
+
+.. seealso::
+
+ :ref:`postgresql_isolation_level`
+
+ :ref:`psycopg2_isolation_level`
+
+
+"""
+from ... import util, exc
+import decimal
+from ... import processors
+from ... import types as sqltypes
+from .base import (
+ PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext,
+ _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES)
+import re
+from sqlalchemy.dialects.postgresql.json import JSON
+
+
+class _PGNumeric(sqltypes.Numeric):
+ def result_processor(self, dialect, coltype):
+ if self.asdecimal:
+ if coltype in _FLOAT_TYPES:
+ return processors.to_decimal_processor_factory(
+ decimal.Decimal, self._effective_decimal_return_scale)
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ # pg8000 returns Decimal natively for 1700
+ return None
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+ else:
+ if coltype in _FLOAT_TYPES:
+ # pg8000 returns float natively for 701
+ return None
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ return processors.to_float
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+
+
+class _PGNumericNoBind(_PGNumeric):
+ def bind_processor(self, dialect):
+ return None
+
+
+class _PGJSON(JSON):
+
+ def result_processor(self, dialect, coltype):
+ if dialect._dbapi_version > (1, 10, 1):
+ return None # Has native JSON
+ else:
+ return super(_PGJSON, self).result_processor(dialect, coltype)
+
+
+class PGExecutionContext_pg8000(PGExecutionContext):
+ pass
+
+
+class PGCompiler_pg8000(PGCompiler):
+ def visit_mod_binary(self, binary, operator, **kw):
+ return self.process(binary.left, **kw) + " %% " + \
+ self.process(binary.right, **kw)
+
+ def post_process_text(self, text):
+ if '%%' in text:
+ util.warn("The SQLAlchemy postgresql dialect "
+ "now automatically escapes '%' in text() "
+ "expressions to '%%'.")
+ return text.replace('%', '%%')
+
+
+class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
+ def _escape_identifier(self, value):
+ value = value.replace(self.escape_quote, self.escape_to_quote)
+ return value.replace('%', '%%')
+
+
+class PGDialect_pg8000(PGDialect):
+ driver = 'pg8000'
+
+ supports_unicode_statements = True
+
+ supports_unicode_binds = True
+
+ default_paramstyle = 'format'
+ supports_sane_multi_rowcount = True
+ execution_ctx_cls = PGExecutionContext_pg8000
+ statement_compiler = PGCompiler_pg8000
+ preparer = PGIdentifierPreparer_pg8000
+ description_encoding = 'use_encoding'
+
+ colspecs = util.update_copy(
+ PGDialect.colspecs,
+ {
+ sqltypes.Numeric: _PGNumericNoBind,
+ sqltypes.Float: _PGNumeric,
+ JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON
+ }
+ )
+
+ def __init__(self, client_encoding=None, **kwargs):
+ PGDialect.__init__(self, **kwargs)
+ self.client_encoding = client_encoding
+
+ def initialize(self, connection):
+ self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
+ super(PGDialect_pg8000, self).initialize(connection)
+
+ @util.memoized_property
+ def _dbapi_version(self):
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ return tuple(
+ [
+ int(x) for x in re.findall(
+ r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)])
+ else:
+ return (99, 99, 99)
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('pg8000')
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if 'port' in opts:
+ opts['port'] = int(opts['port'])
+ opts.update(url.query)
+ return ([], opts)
+
+ def is_disconnect(self, e, connection, cursor):
+ return "connection is closed" in str(e)
+
+ def set_isolation_level(self, connection, level):
+ level = level.replace('_', ' ')
+
+ # adjust for ConnectionFairy possibly being present
+ if hasattr(connection, 'connection'):
+ connection = connection.connection
+
+ if level == 'AUTOCOMMIT':
+ connection.autocommit = True
+ elif level in self._isolation_lookup:
+ connection.autocommit = False
+ cursor = connection.cursor()
+ cursor.execute(
+ "SET SESSION CHARACTERISTICS AS TRANSACTION "
+ "ISOLATION LEVEL %s" % level)
+ cursor.execute("COMMIT")
+ cursor.close()
+ else:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s or AUTOCOMMIT" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+
+ def set_client_encoding(self, connection, client_encoding):
+ # adjust for ConnectionFairy possibly being present
+ if hasattr(connection, 'connection'):
+ connection = connection.connection
+
+ cursor = connection.cursor()
+ cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'")
+ cursor.execute("COMMIT")
+ cursor.close()
+
+ def do_begin_twophase(self, connection, xid):
+ connection.connection.tpc_begin((0, xid, ''))
+
+ def do_prepare_twophase(self, connection, xid):
+ connection.connection.tpc_prepare()
+
+ def do_rollback_twophase(
+ self, connection, xid, is_prepared=True, recover=False):
+ connection.connection.tpc_rollback((0, xid, ''))
+
+ def do_commit_twophase(
+ self, connection, xid, is_prepared=True, recover=False):
+ connection.connection.tpc_commit((0, xid, ''))
+
+ def do_recover_twophase(self, connection):
+ return [row[1] for row in connection.connection.tpc_recover()]
+
+ def on_connect(self):
+ fns = []
+ if self.client_encoding is not None:
+ def on_connect(conn):
+ self.set_client_encoding(conn, self.client_encoding)
+ fns.append(on_connect)
+
+ if self.isolation_level is not None:
+ def on_connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ fns.append(on_connect)
+
+ if len(fns) > 0:
+ def on_connect(conn):
+ for fn in fns:
+ fn(conn)
+ return on_connect
+ else:
+ return None
+
+dialect = PGDialect_pg8000
diff --git a/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py
new file mode 100644
index 0000000..5032814
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -0,0 +1,702 @@
+# postgresql/psycopg2.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: postgresql+psycopg2
+ :name: psycopg2
+ :dbapi: psycopg2
+ :connectstring: postgresql+psycopg2://user:password@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://pypi.python.org/pypi/psycopg2/
+
+psycopg2 Connect Arguments
+-----------------------------------
+
+psycopg2-specific keyword arguments which are accepted by
+:func:`.create_engine()` are:
+
+* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
+ statements which support this feature. What this essentially means from a
+ psycopg2 point of view is that the cursor is created using a name, e.g.
+ ``connection.cursor('some name')``, which has the effect that result rows
+ are not immediately pre-fetched and buffered after statement execution, but
+ are instead left on the server and only retrieved as needed. SQLAlchemy's
+ :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
+ behavior when this feature is enabled, such that groups of 100 rows at a
+ time are fetched over the wire to reduce conversational overhead.
+ Note that the :paramref:`.Connection.execution_options.stream_results`
+ execution option is a more targeted
+ way of enabling this mode on a per-execution basis.
+* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
+ per connection. True by default.
+
+ .. seealso::
+
+ :ref:`psycopg2_disable_native_unicode`
+
+* ``isolation_level``: This option, available for all PostgreSQL dialects,
+ includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
+ dialect.
+
+ .. seealso::
+
+ :ref:`psycopg2_isolation_level`
+
+* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
+ using psycopg2's ``set_client_encoding()`` method.
+
+ .. seealso::
+
+ :ref:`psycopg2_unicode`
+
+Unix Domain Connections
+------------------------
+
+psycopg2 supports connecting via Unix domain connections. When the ``host``
+portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
+which specifies Unix-domain communication rather than TCP/IP communication::
+
+ create_engine("postgresql+psycopg2://user:password@/dbname")
+
+By default, the socket file used is to connect to a Unix-domain socket
+in ``/tmp``, or whatever socket directory was specified when PostgreSQL
+was built. This value can be overridden by passing a pathname to psycopg2,
+using ``host`` as an additional keyword argument::
+
+ create_engine("postgresql+psycopg2://user:password@/dbname?\
+host=/var/lib/postgresql")
+
+See also:
+
+`PQconnectdbParams `_
+
+.. _psycopg2_execution_options:
+
+Per-Statement/Connection Execution Options
+-------------------------------------------
+
+The following DBAPI-specific options are respected when used with
+:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
+:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
+
+* ``isolation_level`` - Set the transaction isolation level for the lifespan of a
+ :class:`.Connection` (can only be set on a connection, not a statement
+ or query). See :ref:`psycopg2_isolation_level`.
+
+* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors -
+ this feature makes use of "named" cursors in combination with special
+ result handling methods so that result rows are not fully buffered.
+ If ``None`` or not set, the ``server_side_cursors`` option of the
+ :class:`.Engine` is used.
+
+* ``max_row_buffer`` - when using ``stream_results``, an integer value that
+ specifies the maximum number of rows to buffer at a time. This is
+ interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
+ buffer will grow to ultimately store 1000 rows at a time.
+
+ .. versionadded:: 1.0.6
+
+.. _psycopg2_unicode:
+
+Unicode with Psycopg2
+----------------------
+
+By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
+extension, such that the DBAPI receives and returns all strings as Python
+Unicode objects directly - SQLAlchemy passes these values through without
+change. Psycopg2 here will encode/decode string values based on the
+current "client encoding" setting; by default this is the value in
+the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
+Typically, this can be changed to ``utf8``, as a more useful default::
+
+ # postgresql.conf file
+
+ # client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+ client_encoding = utf8
+
+A second way to affect the client encoding is to set it within Psycopg2
+locally. SQLAlchemy will call psycopg2's
+:meth:`psycopg2:connection.set_client_encoding` method
+on all new connections based on the value passed to
+:func:`.create_engine` using the ``client_encoding`` parameter::
+
+ # set_client_encoding() setting;
+ # works for *all* PostgreSQL versions
+ engine = create_engine("postgresql://user:pass@host/dbname",
+ client_encoding='utf8')
+
+This overrides the encoding specified in the PostgreSQL client configuration.
+When using the parameter in this way, the psycopg2 driver emits
+``SET client_encoding TO 'utf8'`` on the connection explicitly, and works
+in all PostgreSQL versions.
+
+Note that the ``client_encoding`` setting as passed to :func:`.create_engine`
+is **not the same** as the more recently added ``client_encoding`` parameter
+now supported by libpq directly. This is enabled when ``client_encoding``
+is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed
+using the :paramref:`.create_engine.connect_args` parameter::
+
+ # libpq direct parameter setting;
+ # only works for PostgreSQL **9.1 and above**
+ engine = create_engine("postgresql://user:pass@host/dbname",
+ connect_args={'client_encoding': 'utf8'})
+
+ # using the query string is equivalent
+ engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8")
+
+The above parameter was only added to libpq as of version 9.1 of PostgreSQL,
+so using the previous method is better for cross-version support.
+
+.. _psycopg2_disable_native_unicode:
+
+Disabling Native Unicode
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+SQLAlchemy can also be instructed to skip the usage of the psycopg2
+``UNICODE`` extension and to instead utilize its own unicode encode/decode
+services, which are normally reserved only for those DBAPIs that don't
+fully support unicode directly. Passing ``use_native_unicode=False`` to
+:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
+SQLAlchemy will instead encode data itself into Python bytestrings on the way
+in and coerce from bytes on the way back,
+using the value of the :func:`.create_engine` ``encoding`` parameter, which
+defaults to ``utf-8``.
+SQLAlchemy's own unicode encode/decode functionality is steadily becoming
+obsolete as most DBAPIs now support unicode fully.
+
+Bound Parameter Styles
+----------------------
+
+The default parameter style for the psycopg2 dialect is "pyformat", where
+SQL is rendered using ``%(paramname)s`` style. This format has the limitation
+that it does not accommodate the unusual case of parameter names that
+actually contain percent or parenthesis symbols; as SQLAlchemy in many cases
+generates bound parameter names based on the name of a column, the presence
+of these characters in a column name can lead to problems.
+
+There are two solutions to the issue of a :class:`.schema.Column` that contains
+one of these characters in its name. One is to specify the
+:paramref:`.schema.Column.key` for columns that have such names::
+
+ measurement = Table('measurement', metadata,
+ Column('Size (meters)', Integer, key='size_meters')
+ )
+
+Above, an INSERT statement such as ``measurement.insert()`` will use
+``size_meters`` as the parameter name, and a SQL expression such as
+``measurement.c.size_meters > 10`` will derive the bound parameter name
+from the ``size_meters`` key as well.
+
+.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key`
+ as the source of naming when anonymous bound parameters are created
+ in SQL expressions; previously, this behavior only applied to
+ :meth:`.Table.insert` and :meth:`.Table.update` parameter names.
+
+The other solution is to use a positional format; psycopg2 allows use of the
+"format" paramstyle, which can be passed to
+:paramref:`.create_engine.paramstyle`::
+
+ engine = create_engine(
+ 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format')
+
+With the above engine, instead of a statement like::
+
+ INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s)
+ {'Size (meters)': 1}
+
+we instead see::
+
+ INSERT INTO measurement ("Size (meters)") VALUES (%s)
+ (1, )
+
+Where above, the dictionary style is converted into a tuple with positional
+style.
+
+
+Transactions
+------------
+
+The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
+
+.. _psycopg2_isolation_level:
+
+Psycopg2 Transaction Isolation Level
+-------------------------------------
+
+As discussed in :ref:`postgresql_isolation_level`,
+all PostgreSQL dialects support setting of transaction isolation level
+both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
+as well as the ``isolation_level`` argument used by
+:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these
+options make use of psycopg2's ``set_isolation_level()`` connection method,
+rather than emitting a PostgreSQL directive; this is because psycopg2's
+API-level setting is always emitted at the start of each transaction in any
+case.
+
+The psycopg2 dialect supports these constants for isolation level:
+
+* ``READ COMMITTED``
+* ``READ UNCOMMITTED``
+* ``REPEATABLE READ``
+* ``SERIALIZABLE``
+* ``AUTOCOMMIT``
+
+.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
+ psycopg2.
+
+.. seealso::
+
+ :ref:`postgresql_isolation_level`
+
+ :ref:`pg8000_isolation_level`
+
+
+NOTICE logging
+---------------
+
+The psycopg2 dialect will log PostgreSQL NOTICE messages via the
+``sqlalchemy.dialects.postgresql`` logger::
+
+ import logging
+ logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
+
+.. _psycopg2_hstore::
+
+HSTORE type
+------------
+
+The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
+the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
+by default when psycopg2 version 2.4 or greater is used, and
+it is detected that the target database has the HSTORE type set up for use.
+In other words, when the dialect makes the first
+connection, a sequence like the following is performed:
+
+1. Request the available HSTORE oids using
+ ``psycopg2.extras.HstoreAdapter.get_oids()``.
+ If this function returns a list of HSTORE identifiers, we then determine
+ that the ``HSTORE`` extension is present.
+ This function is **skipped** if the version of psycopg2 installed is
+ less than version 2.4.
+
+2. If the ``use_native_hstore`` flag is at its default of ``True``, and
+ we've detected that ``HSTORE`` oids are available, the
+ ``psycopg2.extensions.register_hstore()`` extension is invoked for all
+ connections.
+
+The ``register_hstore()`` extension has the effect of **all Python
+dictionaries being accepted as parameters regardless of the type of target
+column in SQL**. The dictionaries are converted by this extension into a
+textual HSTORE expression. If this behavior is not desired, disable the
+use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
+follows::
+
+ engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
+ use_native_hstore=False)
+
+The ``HSTORE`` type is **still supported** when the
+``psycopg2.extensions.register_hstore()`` extension is not used. It merely
+means that the coercion between Python dictionaries and the HSTORE
+string format, on both the parameter side and the result side, will take
+place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
+which may be more performant.
+
+"""
+from __future__ import absolute_import
+
+import re
+import logging
+
+from ... import util, exc
+import decimal
+from ... import processors
+from ...engine import result as _result
+from ...sql import expression
+from ... import types as sqltypes
+from .base import PGDialect, PGCompiler, \
+ PGIdentifierPreparer, PGExecutionContext, \
+ ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\
+ _INT_TYPES, UUID
+from .hstore import HSTORE
+from .json import JSON, JSONB
+
+try:
+ from uuid import UUID as _python_UUID
+except ImportError:
+ _python_UUID = None
+
+
+logger = logging.getLogger('sqlalchemy.dialects.postgresql')
+
+
+class _PGNumeric(sqltypes.Numeric):
+ def bind_processor(self, dialect):
+ return None
+
+ def result_processor(self, dialect, coltype):
+ if self.asdecimal:
+ if coltype in _FLOAT_TYPES:
+ return processors.to_decimal_processor_factory(
+ decimal.Decimal,
+ self._effective_decimal_return_scale)
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ # pg8000 returns Decimal natively for 1700
+ return None
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+ else:
+ if coltype in _FLOAT_TYPES:
+ # pg8000 returns float natively for 701
+ return None
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ return processors.to_float
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+
+
+class _PGEnum(ENUM):
+ def result_processor(self, dialect, coltype):
+ if self.native_enum and util.py2k and self.convert_unicode is True:
+ # we can't easily use PG's extensions here because
+ # the OID is on the fly, and we need to give it a python
+ # function anyway - not really worth it.
+ self.convert_unicode = "force_nocheck"
+ return super(_PGEnum, self).result_processor(dialect, coltype)
+
+
+class _PGHStore(HSTORE):
+ def bind_processor(self, dialect):
+ if dialect._has_native_hstore:
+ return None
+ else:
+ return super(_PGHStore, self).bind_processor(dialect)
+
+ def result_processor(self, dialect, coltype):
+ if dialect._has_native_hstore:
+ return None
+ else:
+ return super(_PGHStore, self).result_processor(dialect, coltype)
+
+
+class _PGJSON(JSON):
+
+ def result_processor(self, dialect, coltype):
+ if dialect._has_native_json:
+ return None
+ else:
+ return super(_PGJSON, self).result_processor(dialect, coltype)
+
+
+class _PGJSONB(JSONB):
+
+ def result_processor(self, dialect, coltype):
+ if dialect._has_native_jsonb:
+ return None
+ else:
+ return super(_PGJSONB, self).result_processor(dialect, coltype)
+
+
+class _PGUUID(UUID):
+ def bind_processor(self, dialect):
+ if not self.as_uuid and dialect.use_native_uuid:
+ nonetype = type(None)
+
+ def process(value):
+ if value is not None:
+ value = _python_UUID(value)
+ return value
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if not self.as_uuid and dialect.use_native_uuid:
+ def process(value):
+ if value is not None:
+ value = str(value)
+ return value
+ return process
+
+
+_server_side_id = util.counter()
+
+
+class PGExecutionContext_psycopg2(PGExecutionContext):
+ def create_server_side_cursor(self):
+ # use server-side cursors:
+ # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
+ ident = "c_%s_%s" % (hex(id(self))[2:],
+ hex(_server_side_id())[2:])
+ return self._dbapi_connection.cursor(ident)
+
+ def get_result_proxy(self):
+ # TODO: ouch
+ if logger.isEnabledFor(logging.INFO):
+ self._log_notices(self.cursor)
+
+ if self._is_server_side:
+ return _result.BufferedRowResultProxy(self)
+ else:
+ return _result.ResultProxy(self)
+
+ def _log_notices(self, cursor):
+ for notice in cursor.connection.notices:
+ # NOTICE messages have a
+ # newline character at the end
+ logger.info(notice.rstrip())
+
+ cursor.connection.notices[:] = []
+
+
+class PGCompiler_psycopg2(PGCompiler):
+ def visit_mod_binary(self, binary, operator, **kw):
+ return self.process(binary.left, **kw) + " %% " + \
+ self.process(binary.right, **kw)
+
+ def post_process_text(self, text):
+ return text.replace('%', '%%')
+
+
+class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
+ def _escape_identifier(self, value):
+ value = value.replace(self.escape_quote, self.escape_to_quote)
+ return value.replace('%', '%%')
+
+
+class PGDialect_psycopg2(PGDialect):
+ driver = 'psycopg2'
+ if util.py2k:
+ supports_unicode_statements = False
+
+ supports_server_side_cursors = True
+
+ default_paramstyle = 'pyformat'
+ # set to true based on psycopg2 version
+ supports_sane_multi_rowcount = False
+ execution_ctx_cls = PGExecutionContext_psycopg2
+ statement_compiler = PGCompiler_psycopg2
+ preparer = PGIdentifierPreparer_psycopg2
+ psycopg2_version = (0, 0)
+
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 5),
+ native_jsonb=(2, 5, 4),
+ sane_multi_rowcount=(2, 0, 9),
+ array_oid=(2, 4, 3),
+ hstore_adapter=(2, 4)
+ )
+
+ _has_native_hstore = False
+ _has_native_json = False
+ _has_native_jsonb = False
+
+ engine_config_types = PGDialect.engine_config_types.union([
+ ('use_native_unicode', util.asbool),
+ ])
+
+ colspecs = util.update_copy(
+ PGDialect.colspecs,
+ {
+ sqltypes.Numeric: _PGNumeric,
+ ENUM: _PGEnum, # needs force_unicode
+ sqltypes.Enum: _PGEnum, # needs force_unicode
+ HSTORE: _PGHStore,
+ JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON,
+ JSONB: _PGJSONB,
+ UUID: _PGUUID
+ }
+ )
+
+ def __init__(self, server_side_cursors=False, use_native_unicode=True,
+ client_encoding=None,
+ use_native_hstore=True, use_native_uuid=True,
+ **kwargs):
+ PGDialect.__init__(self, **kwargs)
+ self.server_side_cursors = server_side_cursors
+ self.use_native_unicode = use_native_unicode
+ self.use_native_hstore = use_native_hstore
+ self.use_native_uuid = use_native_uuid
+ self.supports_unicode_binds = use_native_unicode
+ self.client_encoding = client_encoding
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
+ self.dbapi.__version__)
+ if m:
+ self.psycopg2_version = tuple(
+ int(x)
+ for x in m.group(1, 2, 3)
+ if x is not None)
+
+ def initialize(self, connection):
+ super(PGDialect_psycopg2, self).initialize(connection)
+ self._has_native_hstore = self.use_native_hstore and \
+ self._hstore_oids(connection.connection) \
+ is not None
+ self._has_native_json = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json']
+ self._has_native_jsonb = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb']
+
+ # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
+ self.supports_sane_multi_rowcount = \
+ self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['sane_multi_rowcount']
+
+ @classmethod
+ def dbapi(cls):
+ import psycopg2
+ return psycopg2
+
+ @classmethod
+ def _psycopg2_extensions(cls):
+ from psycopg2 import extensions
+ return extensions
+
+ @classmethod
+ def _psycopg2_extras(cls):
+ from psycopg2 import extras
+ return extras
+
+ @util.memoized_property
+ def _isolation_lookup(self):
+ extensions = self._psycopg2_extensions()
+ return {
+ 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
+ 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
+ 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
+ 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
+ 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
+ }
+
+ def set_isolation_level(self, connection, level):
+ try:
+ level = self._isolation_lookup[level.replace('_', ' ')]
+ except KeyError:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+
+ connection.set_isolation_level(level)
+
+ def on_connect(self):
+ extras = self._psycopg2_extras()
+ extensions = self._psycopg2_extensions()
+
+ fns = []
+ if self.client_encoding is not None:
+ def on_connect(conn):
+ conn.set_client_encoding(self.client_encoding)
+ fns.append(on_connect)
+
+ if self.isolation_level is not None:
+ def on_connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ fns.append(on_connect)
+
+ if self.dbapi and self.use_native_uuid:
+ def on_connect(conn):
+ extras.register_uuid(None, conn)
+ fns.append(on_connect)
+
+ if self.dbapi and self.use_native_unicode:
+ def on_connect(conn):
+ extensions.register_type(extensions.UNICODE, conn)
+ extensions.register_type(extensions.UNICODEARRAY, conn)
+ fns.append(on_connect)
+
+ if self.dbapi and self.use_native_hstore:
+ def on_connect(conn):
+ hstore_oids = self._hstore_oids(conn)
+ if hstore_oids is not None:
+ oid, array_oid = hstore_oids
+ kw = {'oid': oid}
+ if util.py2k:
+ kw['unicode'] = True
+ if self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['array_oid']:
+ kw['array_oid'] = array_oid
+ extras.register_hstore(conn, **kw)
+ fns.append(on_connect)
+
+ if self.dbapi and self._json_deserializer:
+ def on_connect(conn):
+ if self._has_native_json:
+ extras.register_default_json(
+ conn, loads=self._json_deserializer)
+ if self._has_native_jsonb:
+ extras.register_default_jsonb(
+ conn, loads=self._json_deserializer)
+ fns.append(on_connect)
+
+ if fns:
+ def on_connect(conn):
+ for fn in fns:
+ fn(conn)
+ return on_connect
+ else:
+ return None
+
+ @util.memoized_instancemethod
+ def _hstore_oids(self, conn):
+ if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']:
+ extras = self._psycopg2_extras()
+ oids = extras.HstoreAdapter.get_oids(conn)
+ if oids is not None and oids[0]:
+ return oids[0:2]
+ return None
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if 'port' in opts:
+ opts['port'] = int(opts['port'])
+ opts.update(url.query)
+ return ([], opts)
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, self.dbapi.Error):
+ # check the "closed" flag. this might not be
+ # present on old psycopg2 versions. Also,
+ # this flag doesn't actually help in a lot of disconnect
+ # situations, so don't rely on it.
+ if getattr(connection, 'closed', False):
+ return True
+
+ # checks based on strings. in the case that .closed
+ # didn't cut it, fall back onto these.
+ str_e = str(e).partition("\n")[0]
+ for msg in [
+ # these error messages from libpq: interfaces/libpq/fe-misc.c
+ # and interfaces/libpq/fe-secure.c.
+ 'terminating connection',
+ 'closed the connection',
+ 'connection not open',
+ 'could not receive data from server',
+ 'could not send data to server',
+ # psycopg2 client errors, psycopg2/conenction.h,
+ # psycopg2/cursor.h
+ 'connection already closed',
+ 'cursor already closed',
+ # not sure where this path is originally from, it may
+ # be obsolete. It really says "losed", not "closed".
+ 'losed the connection unexpectedly',
+ # these can occur in newer SSL
+ 'connection has been closed unexpectedly',
+ 'SSL SYSCALL error: Bad file descriptor',
+ 'SSL SYSCALL error: EOF detected',
+ 'SSL error: decryption failed or bad record mac',
+ ]:
+ idx = str_e.find(msg)
+ if idx >= 0 and '"' not in str_e[:idx]:
+ return True
+ return False
+
+dialect = PGDialect_psycopg2
diff --git a/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
new file mode 100644
index 0000000..e99389d
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -0,0 +1,61 @@
+# testing/engines.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+.. dialect:: postgresql+psycopg2cffi
+ :name: psycopg2cffi
+ :dbapi: psycopg2cffi
+ :connectstring: \
+postgresql+psycopg2cffi://user:password@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://pypi.python.org/pypi/psycopg2cffi/
+
+``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
+layer. This makes it suitable for use in e.g. PyPy. Documentation
+is as per ``psycopg2``.
+
+.. versionadded:: 1.0.0
+
+.. seealso::
+
+ :mod:`sqlalchemy.dialects.postgresql.psycopg2`
+
+"""
+from .psycopg2 import PGDialect_psycopg2
+
+
+class PGDialect_psycopg2cffi(PGDialect_psycopg2):
+ driver = 'psycopg2cffi'
+ supports_unicode_statements = True
+
+ # psycopg2cffi's first release is 2.5.0, but reports
+ # __version__ as 2.4.4. Subsequent releases seem to have
+ # fixed this.
+
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 4, 4),
+ native_jsonb=(2, 7, 1),
+ sane_multi_rowcount=(2, 4, 4),
+ array_oid=(2, 4, 4),
+ hstore_adapter=(2, 4, 4)
+ )
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('psycopg2cffi')
+
+ @classmethod
+ def _psycopg2_extensions(cls):
+ root = __import__('psycopg2cffi', fromlist=['extensions'])
+ return root.extensions
+
+ @classmethod
+ def _psycopg2_extras(cls):
+ root = __import__('psycopg2cffi', fromlist=['extras'])
+ return root.extras
+
+
+dialect = PGDialect_psycopg2cffi
diff --git a/app/lib/sqlalchemy/dialects/postgresql/pygresql.py b/app/lib/sqlalchemy/dialects/postgresql/pygresql.py
new file mode 100644
index 0000000..aa877a2
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/pygresql.py
@@ -0,0 +1,243 @@
+# postgresql/pygresql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: postgresql+pygresql
+ :name: pygresql
+ :dbapi: pgdb
+ :connectstring: postgresql+pygresql://user:password@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://www.pygresql.org/
+"""
+
+import decimal
+import re
+
+from ... import exc, processors, util
+from ...types import Numeric, JSON as Json
+from ...sql.elements import Null
+from .base import PGDialect, PGCompiler, PGIdentifierPreparer, \
+ _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES, UUID
+from .hstore import HSTORE
+from .json import JSON, JSONB
+
+
+class _PGNumeric(Numeric):
+
+ def bind_processor(self, dialect):
+ return None
+
+ def result_processor(self, dialect, coltype):
+ if not isinstance(coltype, int):
+ coltype = coltype.oid
+ if self.asdecimal:
+ if coltype in _FLOAT_TYPES:
+ return processors.to_decimal_processor_factory(
+ decimal.Decimal,
+ self._effective_decimal_return_scale)
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ # PyGreSQL returns Decimal natively for 1700 (numeric)
+ return None
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+ else:
+ if coltype in _FLOAT_TYPES:
+ # PyGreSQL returns float natively for 701 (float8)
+ return None
+ elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
+ return processors.to_float
+ else:
+ raise exc.InvalidRequestError(
+ "Unknown PG numeric type: %d" % coltype)
+
+
+class _PGHStore(HSTORE):
+
+ def bind_processor(self, dialect):
+ if not dialect.has_native_hstore:
+ return super(_PGHStore, self).bind_processor(dialect)
+ hstore = dialect.dbapi.Hstore
+ def process(value):
+ if isinstance(value, dict):
+ return hstore(value)
+ return value
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if not dialect.has_native_hstore:
+ return super(_PGHStore, self).result_processor(dialect, coltype)
+
+
+class _PGJSON(JSON):
+
+ def bind_processor(self, dialect):
+ if not dialect.has_native_json:
+ return super(_PGJSON, self).bind_processor(dialect)
+ json = dialect.dbapi.Json
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, Null) or (
+ value is None and self.none_as_null):
+ return None
+ if value is None or isinstance(value, (dict, list)):
+ return json(value)
+ return value
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if not dialect.has_native_json:
+ return super(_PGJSON, self).result_processor(dialect, coltype)
+
+
+class _PGJSONB(JSONB):
+
+ def bind_processor(self, dialect):
+ if not dialect.has_native_json:
+ return super(_PGJSONB, self).bind_processor(dialect)
+ json = dialect.dbapi.Json
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, Null) or (
+ value is None and self.none_as_null):
+ return None
+ if value is None or isinstance(value, (dict, list)):
+ return json(value)
+ return value
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if not dialect.has_native_json:
+ return super(_PGJSONB, self).result_processor(dialect, coltype)
+
+
+class _PGUUID(UUID):
+
+ def bind_processor(self, dialect):
+ if not dialect.has_native_uuid:
+ return super(_PGUUID, self).bind_processor(dialect)
+ uuid = dialect.dbapi.Uuid
+
+ def process(value):
+ if value is None:
+ return None
+ if isinstance(value, (str, bytes)):
+ if len(value) == 16:
+ return uuid(bytes=value)
+ return uuid(value)
+ if isinstance(value, int):
+ return uuid(int=value)
+ return value
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if not dialect.has_native_uuid:
+ return super(_PGUUID, self).result_processor(dialect, coltype)
+ if not self.as_uuid:
+ def process(value):
+ if value is not None:
+ return str(value)
+ return process
+
+
+class _PGCompiler(PGCompiler):
+
+ def visit_mod_binary(self, binary, operator, **kw):
+ return self.process(binary.left, **kw) + " %% " + \
+ self.process(binary.right, **kw)
+
+ def post_process_text(self, text):
+ return text.replace('%', '%%')
+
+
+class _PGIdentifierPreparer(PGIdentifierPreparer):
+
+ def _escape_identifier(self, value):
+ value = value.replace(self.escape_quote, self.escape_to_quote)
+ return value.replace('%', '%%')
+
+
+class PGDialect_pygresql(PGDialect):
+
+ driver = 'pygresql'
+
+ statement_compiler = _PGCompiler
+ preparer = _PGIdentifierPreparer
+
+ @classmethod
+ def dbapi(cls):
+ import pgdb
+ return pgdb
+
+ colspecs = util.update_copy(
+ PGDialect.colspecs,
+ {
+ Numeric: _PGNumeric,
+ HSTORE: _PGHStore,
+ Json: _PGJSON,
+ JSON: _PGJSON,
+ JSONB: _PGJSONB,
+ UUID: _PGUUID,
+ }
+ )
+
+ def __init__(self, **kwargs):
+ super(PGDialect_pygresql, self).__init__(**kwargs)
+ try:
+ version = self.dbapi.version
+ m = re.match(r'(\d+)\.(\d+)', version)
+ version = (int(m.group(1)), int(m.group(2)))
+ except (AttributeError, ValueError, TypeError):
+ version = (0, 0)
+ self.dbapi_version = version
+ if version < (5, 0):
+ has_native_hstore = has_native_json = has_native_uuid = False
+ if version != (0, 0):
+ util.warn("PyGreSQL is only fully supported by SQLAlchemy"
+ " since version 5.0.")
+ else:
+ self.supports_unicode_statements = True
+ self.supports_unicode_binds = True
+ has_native_hstore = has_native_json = has_native_uuid = True
+ self.has_native_hstore = has_native_hstore
+ self.has_native_json = has_native_json
+ self.has_native_uuid = has_native_uuid
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if 'port' in opts:
+ opts['host'] = '%s:%s' % (
+ opts.get('host', '').rsplit(':', 1)[0], opts.pop('port'))
+ opts.update(url.query)
+ return [], opts
+
+ def is_disconnect(self, e, connection, cursor):
+ if isinstance(e, self.dbapi.Error):
+ if not connection:
+ return False
+ try:
+ connection = connection.connection
+ except AttributeError:
+ pass
+ else:
+ if not connection:
+ return False
+ try:
+ return connection.closed
+ except AttributeError: # PyGreSQL < 5.0
+ return connection._cnx is None
+ return False
+
+
+dialect = PGDialect_pygresql
diff --git a/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
new file mode 100644
index 0000000..ab77493
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
@@ -0,0 +1,97 @@
+# postgresql/pypostgresql.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: postgresql+pypostgresql
+ :name: py-postgresql
+ :dbapi: pypostgresql
+ :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://python.projects.pgfoundry.org/
+
+
+"""
+from ... import util
+from ... import types as sqltypes
+from .base import PGDialect, PGExecutionContext
+from ... import processors
+
+
+class PGNumeric(sqltypes.Numeric):
+ def bind_processor(self, dialect):
+ return processors.to_str
+
+ def result_processor(self, dialect, coltype):
+ if self.asdecimal:
+ return None
+ else:
+ return processors.to_float
+
+
+class PGExecutionContext_pypostgresql(PGExecutionContext):
+ pass
+
+
+class PGDialect_pypostgresql(PGDialect):
+ driver = 'pypostgresql'
+
+ supports_unicode_statements = True
+ supports_unicode_binds = True
+ description_encoding = None
+ default_paramstyle = 'pyformat'
+
+ # requires trunk version to support sane rowcounts
+ # TODO: use dbapi version information to set this flag appropriately
+ supports_sane_rowcount = True
+ supports_sane_multi_rowcount = False
+
+ execution_ctx_cls = PGExecutionContext_pypostgresql
+ colspecs = util.update_copy(
+ PGDialect.colspecs,
+ {
+ sqltypes.Numeric: PGNumeric,
+
+ # prevents PGNumeric from being used
+ sqltypes.Float: sqltypes.Float,
+ }
+ )
+
+ @classmethod
+ def dbapi(cls):
+ from postgresql.driver import dbapi20
+ return dbapi20
+
+ _DBAPI_ERROR_NAMES = [
+ "Error",
+ "InterfaceError", "DatabaseError", "DataError",
+ "OperationalError", "IntegrityError", "InternalError",
+ "ProgrammingError", "NotSupportedError"
+ ]
+
+ @util.memoized_property
+ def dbapi_exception_translation_map(self):
+ if self.dbapi is None:
+ return {}
+
+ return dict(
+ (getattr(self.dbapi, name).__name__, name)
+ for name in self._DBAPI_ERROR_NAMES
+ )
+
+ def create_connect_args(self, url):
+ opts = url.translate_connect_args(username='user')
+ if 'port' in opts:
+ opts['port'] = int(opts['port'])
+ else:
+ opts['port'] = 5432
+ opts.update(url.query)
+ return ([], opts)
+
+ def is_disconnect(self, e, connection, cursor):
+ return "connection is closed" in str(e)
+
+dialect = PGDialect_pypostgresql
diff --git a/app/lib/sqlalchemy/dialects/postgresql/ranges.py b/app/lib/sqlalchemy/dialects/postgresql/ranges.py
new file mode 100644
index 0000000..e7f7da4
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/ranges.py
@@ -0,0 +1,168 @@
+# Copyright (C) 2013-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from .base import ischema_names
+from ... import types as sqltypes
+
+__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
+
+
+class RangeOperators(object):
+ """
+ This mixin provides functionality for the Range Operators
+ listed in Table 9-44 of the `postgres documentation`__ for Range
+ Functions and Operators. It is used by all the range types
+ provided in the ``postgres`` dialect and can likely be used for
+ any range types you create yourself.
+
+ __ http://www.postgresql.org/docs/devel/static/functions-range.html
+
+ No extra support is provided for the Range Functions listed in
+ Table 9-45 of the postgres documentation. For these, the normal
+ :func:`~sqlalchemy.sql.expression.func` object should be used.
+
+ .. versionadded:: 0.8.2 Support for PostgreSQL RANGE operations.
+
+ """
+
+ class comparator_factory(sqltypes.Concatenable.Comparator):
+ """Define comparison operations for range types."""
+
+ def __ne__(self, other):
+ "Boolean expression. Returns true if two ranges are not equal"
+ return self.expr.op('<>')(other)
+
+ def contains(self, other, **kw):
+ """Boolean expression. Returns true if the right hand operand,
+ which can be an element or a range, is contained within the
+ column.
+ """
+ return self.expr.op('@>')(other)
+
+ def contained_by(self, other):
+ """Boolean expression. Returns true if the column is contained
+ within the right hand operand.
+ """
+ return self.expr.op('<@')(other)
+
+ def overlaps(self, other):
+ """Boolean expression. Returns true if the column overlaps
+ (has points in common with) the right hand operand.
+ """
+ return self.expr.op('&&')(other)
+
+ def strictly_left_of(self, other):
+ """Boolean expression. Returns true if the column is strictly
+ left of the right hand operand.
+ """
+ return self.expr.op('<<')(other)
+
+ __lshift__ = strictly_left_of
+
+ def strictly_right_of(self, other):
+ """Boolean expression. Returns true if the column is strictly
+ right of the right hand operand.
+ """
+ return self.expr.op('>>')(other)
+
+ __rshift__ = strictly_right_of
+
+ def not_extend_right_of(self, other):
+ """Boolean expression. Returns true if the range in the column
+ does not extend right of the range in the operand.
+ """
+ return self.expr.op('&<')(other)
+
+ def not_extend_left_of(self, other):
+ """Boolean expression. Returns true if the range in the column
+ does not extend left of the range in the operand.
+ """
+ return self.expr.op('&>')(other)
+
+ def adjacent_to(self, other):
+ """Boolean expression. Returns true if the range in the column
+ is adjacent to the range in the operand.
+ """
+ return self.expr.op('-|-')(other)
+
+ def __add__(self, other):
+ """Range expression. Returns the union of the two ranges.
+ Will raise an exception if the resulting range is not
+ contigous.
+ """
+ return self.expr.op('+')(other)
+
+
+class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL INT4RANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'INT4RANGE'
+
+ischema_names['int4range'] = INT4RANGE
+
+
+class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL INT8RANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'INT8RANGE'
+
+ischema_names['int8range'] = INT8RANGE
+
+
+class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL NUMRANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'NUMRANGE'
+
+ischema_names['numrange'] = NUMRANGE
+
+
+class DATERANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL DATERANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'DATERANGE'
+
+ischema_names['daterange'] = DATERANGE
+
+
+class TSRANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL TSRANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'TSRANGE'
+
+ischema_names['tsrange'] = TSRANGE
+
+
+class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
+ """Represent the PostgreSQL TSTZRANGE type.
+
+ .. versionadded:: 0.8.2
+
+ """
+
+ __visit_name__ = 'TSTZRANGE'
+
+ischema_names['tstzrange'] = TSTZRANGE
diff --git a/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py b/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
new file mode 100644
index 0000000..f3cfbb8
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
@@ -0,0 +1,46 @@
+# postgresql/zxjdbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: postgresql+zxjdbc
+ :name: zxJDBC for Jython
+ :dbapi: zxjdbc
+ :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
+ :driverurl: http://jdbc.postgresql.org/
+
+
+"""
+from ...connectors.zxJDBC import ZxJDBCConnector
+from .base import PGDialect, PGExecutionContext
+
+
+class PGExecutionContext_zxjdbc(PGExecutionContext):
+
+ def create_cursor(self):
+ cursor = self._dbapi_connection.cursor()
+ cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
+ return cursor
+
+
+class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
+ jdbc_db_name = 'postgresql'
+ jdbc_driver_name = 'org.postgresql.Driver'
+
+ execution_ctx_cls = PGExecutionContext_zxjdbc
+
+ supports_native_decimal = True
+
+ def __init__(self, *args, **kwargs):
+ super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
+ from com.ziclix.python.sql.handler import PostgresqlDataHandler
+ self.DataHandler = PostgresqlDataHandler
+
+ def _get_server_version_info(self, connection):
+ parts = connection.connection.dbversion.split('.')
+ return tuple(int(x) for x in parts)
+
+dialect = PGDialect_zxjdbc
diff --git a/app/lib/sqlalchemy/dialects/sqlite/__init__.py b/app/lib/sqlalchemy/dialects/sqlite/__init__.py
new file mode 100644
index 0000000..a0ec025
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sqlite/__init__.py
@@ -0,0 +1,20 @@
+# sqlite/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from sqlalchemy.dialects.sqlite import base, pysqlite, pysqlcipher
+
+# default dialect
+base.dialect = pysqlite.dialect
+
+from sqlalchemy.dialects.sqlite.base import (
+ BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL,
+ NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect,
+)
+
+__all__ = ('BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL',
+ 'FLOAT', 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME',
+ 'TIMESTAMP', 'VARCHAR', 'REAL', 'dialect')
diff --git a/app/lib/sqlalchemy/dialects/sqlite/base.py b/app/lib/sqlalchemy/dialects/sqlite/base.py
new file mode 100644
index 0000000..76193ff
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sqlite/base.py
@@ -0,0 +1,1577 @@
+# sqlite/base.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+.. dialect:: sqlite
+ :name: SQLite
+
+.. _sqlite_datetime:
+
+Date and Time Types
+-------------------
+
+SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
+not provide out of the box functionality for translating values between Python
+`datetime` objects and a SQLite-supported format. SQLAlchemy's own
+:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
+and parsing functionality when SQlite is used. The implementation classes are
+:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
+These types represent dates and times as ISO formatted strings, which also
+nicely support ordering. There's no reliance on typical "libc" internals for
+these functions so historical dates are fully supported.
+
+Ensuring Text affinity
+^^^^^^^^^^^^^^^^^^^^^^
+
+The DDL rendered for these types is the standard ``DATE``, ``TIME``
+and ``DATETIME`` indicators. However, custom storage formats can also be
+applied to these types. When the
+storage format is detected as containing no alpha characters, the DDL for
+these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
+so that the column continues to have textual affinity.
+
+.. seealso::
+
+ `Type Affinity `_ - in the SQLite documentation
+
+.. _sqlite_autoincrement:
+
+SQLite Auto Incrementing Behavior
+----------------------------------
+
+Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
+
+Key concepts:
+
+* SQLite has an implicit "auto increment" feature that takes place for any
+ non-composite primary-key column that is specifically created using
+ "INTEGER PRIMARY KEY" for the type + primary key.
+
+* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
+ equivalent to the implicit autoincrement feature; this keyword is not
+ recommended for general use. SQLAlchemy does not render this keyword
+ unless a special SQLite-specific directive is used (see below). However,
+ it still requires that the column's type is named "INTEGER".
+
+Using the AUTOINCREMENT Keyword
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To specifically render the AUTOINCREMENT keyword on the primary key column
+when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
+construct::
+
+ Table('sometable', metadata,
+ Column('id', Integer, primary_key=True),
+ sqlite_autoincrement=True)
+
+Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+SQLite's typing model is based on naming conventions. Among
+other things, this means that any type name which contains the
+substring ``"INT"`` will be determined to be of "integer affinity". A
+type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by
+SQLite to be of "integer" affinity. However, **the SQLite
+autoincrement feature, whether implicitly or explicitly enabled,
+requires that the name of the column's type
+is exactly the string "INTEGER"**. Therefore, if an
+application uses a type like :class:`.BigInteger` for a primary key, on
+SQLite this type will need to be rendered as the name ``"INTEGER"`` when
+emitting the initial ``CREATE TABLE`` statement in order for the autoincrement
+behavior to be available.
+
+One approach to achieve this is to use :class:`.Integer` on SQLite
+only using :meth:`.TypeEngine.with_variant`::
+
+ table = Table(
+ "my_table", metadata,
+ Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
+ )
+
+Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name
+to be ``INTEGER`` when compiled against SQLite::
+
+ from sqlalchemy import BigInteger
+ from sqlalchemy.ext.compiler import compiles
+
+ class SLBigInteger(BigInteger):
+ pass
+
+ @compiles(SLBigInteger, 'sqlite')
+ def bi_c(element, compiler, **kw):
+ return "INTEGER"
+
+ @compiles(SLBigInteger)
+ def bi_c(element, compiler, **kw):
+ return compiler.visit_BIGINT(element, **kw)
+
+
+ table = Table(
+ "my_table", metadata,
+ Column("id", SLBigInteger(), primary_key=True)
+ )
+
+.. seealso::
+
+ :meth:`.TypeEngine.with_variant`
+
+ :ref:`sqlalchemy.ext.compiler_toplevel`
+
+ `Datatypes In SQLite Version 3 `_
+
+.. _sqlite_concurrency:
+
+Database Locking Behavior / Concurrency
+---------------------------------------
+
+SQLite is not designed for a high level of write concurrency. The database
+itself, being a file, is locked completely during write operations within
+transactions, meaning exactly one "connection" (in reality a file handle)
+has exclusive access to the database during this period - all other
+"connections" will be blocked during this time.
+
+The Python DBAPI specification also calls for a connection model that is
+always in a transaction; there is no ``connection.begin()`` method,
+only ``connection.commit()`` and ``connection.rollback()``, upon which a
+new transaction is to be begun immediately. This may seem to imply
+that the SQLite driver would in theory allow only a single filehandle on a
+particular database file at any time; however, there are several
+factors both within SQlite itself as well as within the pysqlite driver
+which loosen this restriction significantly.
+
+However, no matter what locking modes are used, SQLite will still always
+lock the database file once a transaction is started and DML (e.g. INSERT,
+UPDATE, DELETE) has at least been emitted, and this will block
+other transactions at least at the point that they also attempt to emit DML.
+By default, the length of time on this block is very short before it times out
+with an error.
+
+This behavior becomes more critical when used in conjunction with the
+SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
+within a transaction, and with its autoflush model, may emit DML preceding
+any SELECT statement. This may lead to a SQLite database that locks
+more quickly than is expected. The locking mode of SQLite and the pysqlite
+driver can be manipulated to some degree, however it should be noted that
+achieving a high degree of write-concurrency with SQLite is a losing battle.
+
+For more information on SQLite's lack of write concurrency by design, please
+see
+`Situations Where Another RDBMS May Work Better - High Concurrency
+`_ near the bottom of the page.
+
+The following subsections introduce areas that are impacted by SQLite's
+file-based architecture and additionally will usually require workarounds to
+work when using the pysqlite driver.
+
+.. _sqlite_isolation_level:
+
+Transaction Isolation Level
+----------------------------
+
+SQLite supports "transaction isolation" in a non-standard way, along two
+axes. One is that of the `PRAGMA read_uncommitted `_
+instruction. This setting can essentially switch SQLite between its
+default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
+mode normally referred to as ``READ UNCOMMITTED``.
+
+SQLAlchemy ties into this PRAGMA statement using the
+:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`.
+Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
+and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
+SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
+the pysqlite driver's default behavior.
+
+The other axis along which SQLite's transactional locking is impacted is
+via the nature of the ``BEGIN`` statement used. The three varieties
+are "deferred", "immediate", and "exclusive", as described at
+`BEGIN TRANSACTION `_. A straight
+``BEGIN`` statement uses the "deferred" mode, where the the database file is
+not locked until the first read or write operation, and read access remains
+open to other transactions until the first write operation. But again,
+it is critical to note that the pysqlite driver interferes with this behavior
+by *not even emitting BEGIN* until the first write operation.
+
+.. warning::
+
+ SQLite's transactional scope is impacted by unresolved
+ issues in the pysqlite driver, which defers BEGIN statements to a greater
+ degree than is often feasible. See the section :ref:`pysqlite_serializable`
+ for techniques to work around this behavior.
+
+SAVEPOINT Support
+----------------------------
+
+SQLite supports SAVEPOINTs, which only function once a transaction is
+begun. SQLAlchemy's SAVEPOINT support is available using the
+:meth:`.Connection.begin_nested` method at the Core level, and
+:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
+won't work at all with pysqlite unless workarounds are taken.
+
+.. warning::
+
+ SQLite's SAVEPOINT feature is impacted by unresolved
+ issues in the pysqlite driver, which defers BEGIN statements to a greater
+ degree than is often feasible. See the section :ref:`pysqlite_serializable`
+ for techniques to work around this behavior.
+
+Transactional DDL
+----------------------------
+
+The SQLite database supports transactional :term:`DDL` as well.
+In this case, the pysqlite driver is not only failing to start transactions,
+it also is ending any existing transction when DDL is detected, so again,
+workarounds are required.
+
+.. warning::
+
+ SQLite's transactional DDL is impacted by unresolved issues
+ in the pysqlite driver, which fails to emit BEGIN and additionally
+ forces a COMMIT to cancel any transaction when DDL is encountered.
+ See the section :ref:`pysqlite_serializable`
+ for techniques to work around this behavior.
+
+.. _sqlite_foreign_keys:
+
+Foreign Key Support
+-------------------
+
+SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
+however by default these constraints have no effect on the operation of the
+table.
+
+Constraint checking on SQLite has three prerequisites:
+
+* At least version 3.6.19 of SQLite must be in use
+* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
+ or SQLITE_OMIT_TRIGGER symbols enabled.
+* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
+ connections before use.
+
+SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
+new connections through the usage of events::
+
+ from sqlalchemy.engine import Engine
+ from sqlalchemy import event
+
+ @event.listens_for(Engine, "connect")
+ def set_sqlite_pragma(dbapi_connection, connection_record):
+ cursor = dbapi_connection.cursor()
+ cursor.execute("PRAGMA foreign_keys=ON")
+ cursor.close()
+
+.. warning::
+
+ When SQLite foreign keys are enabled, it is **not possible**
+ to emit CREATE or DROP statements for tables that contain
+ mutually-dependent foreign key constraints;
+ to emit the DDL for these tables requires that ALTER TABLE be used to
+ create or drop these constraints separately, for which SQLite has
+ no support.
+
+.. seealso::
+
+ `SQLite Foreign Key Support `_
+ - on the SQLite web site.
+
+ :ref:`event_toplevel` - SQLAlchemy event API.
+
+ :ref:`use_alter` - more information on SQLAlchemy's facilities for handling
+ mutually-dependent foreign key constraints.
+
+.. _sqlite_type_reflection:
+
+Type Reflection
+---------------
+
+SQLite types are unlike those of most other database backends, in that
+the string name of the type usually does not correspond to a "type" in a
+one-to-one fashion. Instead, SQLite links per-column typing behavior
+to one of five so-called "type affinities" based on a string matching
+pattern for the type.
+
+SQLAlchemy's reflection process, when inspecting types, uses a simple
+lookup table to link the keywords returned to provided SQLAlchemy types.
+This lookup table is present within the SQLite dialect as it is for all
+other dialects. However, the SQLite dialect has a different "fallback"
+routine for when a particular type name is not located in the lookup map;
+it instead implements the SQLite "type affinity" scheme located at
+http://www.sqlite.org/datatype3.html section 2.1.
+
+The provided typemap will make direct associations from an exact string
+name match for the following types:
+
+:class:`~.types.BIGINT`, :class:`~.types.BLOB`,
+:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`,
+:class:`~.types.CHAR`, :class:`~.types.DATE`,
+:class:`~.types.DATETIME`, :class:`~.types.FLOAT`,
+:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`,
+:class:`~.types.INTEGER`, :class:`~.types.INTEGER`,
+:class:`~.types.NUMERIC`, :class:`~.types.REAL`,
+:class:`~.types.SMALLINT`, :class:`~.types.TEXT`,
+:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`,
+:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`,
+:class:`~.types.NCHAR`
+
+When a type name does not match one of the above types, the "type affinity"
+lookup is used instead:
+
+* :class:`~.types.INTEGER` is returned if the type name includes the
+ string ``INT``
+* :class:`~.types.TEXT` is returned if the type name includes the
+ string ``CHAR``, ``CLOB`` or ``TEXT``
+* :class:`~.types.NullType` is returned if the type name includes the
+ string ``BLOB``
+* :class:`~.types.REAL` is returned if the type name includes the string
+ ``REAL``, ``FLOA`` or ``DOUB``.
+* Otherwise, the :class:`~.types.NUMERIC` type is used.
+
+.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
+ columns.
+
+
+.. _sqlite_partial_index:
+
+Partial Indexes
+---------------
+
+A partial index, e.g. one which uses a WHERE clause, can be specified
+with the DDL system using the argument ``sqlite_where``::
+
+ tbl = Table('testtbl', m, Column('data', Integer))
+ idx = Index('test_idx1', tbl.c.data,
+ sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
+
+The index will be rendered at create time as::
+
+ CREATE INDEX test_idx1 ON testtbl (data)
+ WHERE data > 5 AND data < 10
+
+.. versionadded:: 0.9.9
+
+.. _sqlite_dotted_column_names:
+
+Dotted Column Names
+-------------------
+
+Using table or column names that explicitly have periods in them is
+**not recommended**. While this is generally a bad idea for relational
+databases in general, as the dot is a syntactically significant character,
+the SQLite driver up until version **3.10.0** of SQLite has a bug which
+requires that SQLAlchemy filter out these dots in result sets.
+
+.. versionchanged:: 1.1
+
+ The following SQLite issue has been resolved as of version 3.10.0
+ of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
+ workarounds based on detection of this version.
+
+The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
+
+ import sqlite3
+
+ assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
+
+ conn = sqlite3.connect(":memory:")
+ cursor = conn.cursor()
+
+ cursor.execute("create table x (a integer, b integer)")
+ cursor.execute("insert into x (a, b) values (1, 1)")
+ cursor.execute("insert into x (a, b) values (2, 2)")
+
+ cursor.execute("select x.a, x.b from x")
+ assert [c[0] for c in cursor.description] == ['a', 'b']
+
+ cursor.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert [c[0] for c in cursor.description] == ['a', 'b'], \
+ [c[0] for c in cursor.description]
+
+The second assertion fails::
+
+ Traceback (most recent call last):
+ File "test.py", line 19, in
+ [c[0] for c in cursor.description]
+ AssertionError: ['x.a', 'x.b']
+
+Where above, the driver incorrectly reports the names of the columns
+including the name of the table, which is entirely inconsistent vs.
+when the UNION is not present.
+
+SQLAlchemy relies upon column names being predictable in how they match
+to the original statement, so the SQLAlchemy dialect has no choice but
+to filter these out::
+
+
+ from sqlalchemy import create_engine
+
+ eng = create_engine("sqlite://")
+ conn = eng.connect()
+
+ conn.execute("create table x (a integer, b integer)")
+ conn.execute("insert into x (a, b) values (1, 1)")
+ conn.execute("insert into x (a, b) values (2, 2)")
+
+ result = conn.execute("select x.a, x.b from x")
+ assert result.keys() == ["a", "b"]
+
+ result = conn.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["a", "b"]
+
+Note that above, even though SQLAlchemy filters out the dots, *both
+names are still addressable*::
+
+ >>> row = result.first()
+ >>> row["a"]
+ 1
+ >>> row["x.a"]
+ 1
+ >>> row["b"]
+ 1
+ >>> row["x.b"]
+ 1
+
+Therefore, the workaround applied by SQLAlchemy only impacts
+:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API.
+In the very specific case where
+an application is forced to use column names that contain dots, and the
+functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()`
+is required to return these dotted names unmodified, the ``sqlite_raw_colnames``
+execution option may be provided, either on a per-:class:`.Connection` basis::
+
+ result = conn.execution_options(sqlite_raw_colnames=True).execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["x.a", "x.b"]
+
+or on a per-:class:`.Engine` basis::
+
+ engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
+
+When using the per-:class:`.Engine` execution option, note that
+**Core and ORM queries that use UNION may not function properly**.
+
+"""
+
+import datetime
+import re
+
+from ... import processors
+from ... import sql, exc
+from ... import types as sqltypes, schema as sa_schema
+from ... import util
+from ...engine import default, reflection
+from ...sql import compiler
+
+from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT,
+ INTEGER, REAL, NUMERIC, SMALLINT, TEXT,
+ TIMESTAMP, VARCHAR)
+
+
+class _DateTimeMixin(object):
+ _reg = None
+ _storage_format = None
+
+ def __init__(self, storage_format=None, regexp=None, **kw):
+ super(_DateTimeMixin, self).__init__(**kw)
+ if regexp is not None:
+ self._reg = re.compile(regexp)
+ if storage_format is not None:
+ self._storage_format = storage_format
+
+ @property
+ def format_is_text_affinity(self):
+ """return True if the storage format will automatically imply
+ a TEXT affinity.
+
+ If the storage format contains no non-numeric characters,
+ it will imply a NUMERIC storage format on SQLite; in this case,
+ the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
+ TIME_CHAR.
+
+ .. versionadded:: 1.0.0
+
+ """
+ spec = self._storage_format % {
+ "year": 0, "month": 0, "day": 0, "hour": 0,
+ "minute": 0, "second": 0, "microsecond": 0
+ }
+ return bool(re.search(r'[^0-9]', spec))
+
+ def adapt(self, cls, **kw):
+ if issubclass(cls, _DateTimeMixin):
+ if self._storage_format:
+ kw["storage_format"] = self._storage_format
+ if self._reg:
+ kw["regexp"] = self._reg
+ return super(_DateTimeMixin, self).adapt(cls, **kw)
+
+ def literal_processor(self, dialect):
+ bp = self.bind_processor(dialect)
+
+ def process(value):
+ return "'%s'" % bp(value)
+ return process
+
+
+class DATETIME(_DateTimeMixin, sqltypes.DateTime):
+ r"""Represent a Python datetime object in SQLite using a string.
+
+ The default string storage format is::
+
+ "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\
+%(second)02d.%(microsecond)06d"
+
+ e.g.::
+
+ 2011-03-15 12:05:57.10558
+
+ The storage format can be customized to some degree using the
+ ``storage_format`` and ``regexp`` parameters, such as::
+
+ import re
+ from sqlalchemy.dialects.sqlite import DATETIME
+
+ dt = DATETIME(
+ storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\
+%(min)02d:%(second)02d",
+ regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
+ )
+
+ :param storage_format: format string which will be applied to the dict
+ with keys year, month, day, hour, minute, second, and microsecond.
+
+ :param regexp: regular expression which will be applied to incoming result
+ rows. If the regexp contains named groups, the resulting match dict is
+ applied to the Python datetime() constructor as keyword arguments.
+ Otherwise, if positional groups are used, the datetime() constructor
+ is called with positional arguments via
+ ``*map(int, match_obj.groups(0))``.
+ """
+
+ _storage_format = (
+ "%(year)04d-%(month)02d-%(day)02d "
+ "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
+ )
+
+ def __init__(self, *args, **kwargs):
+ truncate_microseconds = kwargs.pop('truncate_microseconds', False)
+ super(DATETIME, self).__init__(*args, **kwargs)
+ if truncate_microseconds:
+ assert 'storage_format' not in kwargs, "You can specify only "\
+ "one of truncate_microseconds or storage_format."
+ assert 'regexp' not in kwargs, "You can specify only one of "\
+ "truncate_microseconds or regexp."
+ self._storage_format = (
+ "%(year)04d-%(month)02d-%(day)02d "
+ "%(hour)02d:%(minute)02d:%(second)02d"
+ )
+
+ def bind_processor(self, dialect):
+ datetime_datetime = datetime.datetime
+ datetime_date = datetime.date
+ format = self._storage_format
+
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, datetime_datetime):
+ return format % {
+ 'year': value.year,
+ 'month': value.month,
+ 'day': value.day,
+ 'hour': value.hour,
+ 'minute': value.minute,
+ 'second': value.second,
+ 'microsecond': value.microsecond,
+ }
+ elif isinstance(value, datetime_date):
+ return format % {
+ 'year': value.year,
+ 'month': value.month,
+ 'day': value.day,
+ 'hour': 0,
+ 'minute': 0,
+ 'second': 0,
+ 'microsecond': 0,
+ }
+ else:
+ raise TypeError("SQLite DateTime type only accepts Python "
+ "datetime and date objects as input.")
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if self._reg:
+ return processors.str_to_datetime_processor_factory(
+ self._reg, datetime.datetime)
+ else:
+ return processors.str_to_datetime
+
+
+class DATE(_DateTimeMixin, sqltypes.Date):
+ r"""Represent a Python date object in SQLite using a string.
+
+ The default string storage format is::
+
+ "%(year)04d-%(month)02d-%(day)02d"
+
+ e.g.::
+
+ 2011-03-15
+
+ The storage format can be customized to some degree using the
+ ``storage_format`` and ``regexp`` parameters, such as::
+
+ import re
+ from sqlalchemy.dialects.sqlite import DATE
+
+ d = DATE(
+ storage_format="%(month)02d/%(day)02d/%(year)04d",
+ regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)")
+ )
+
+ :param storage_format: format string which will be applied to the
+ dict with keys year, month, and day.
+
+ :param regexp: regular expression which will be applied to
+ incoming result rows. If the regexp contains named groups, the
+ resulting match dict is applied to the Python date() constructor
+ as keyword arguments. Otherwise, if positional groups are used, the
+ date() constructor is called with positional arguments via
+ ``*map(int, match_obj.groups(0))``.
+ """
+
+ _storage_format = "%(year)04d-%(month)02d-%(day)02d"
+
+ def bind_processor(self, dialect):
+ datetime_date = datetime.date
+ format = self._storage_format
+
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, datetime_date):
+ return format % {
+ 'year': value.year,
+ 'month': value.month,
+ 'day': value.day,
+ }
+ else:
+ raise TypeError("SQLite Date type only accepts Python "
+ "date objects as input.")
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if self._reg:
+ return processors.str_to_datetime_processor_factory(
+ self._reg, datetime.date)
+ else:
+ return processors.str_to_date
+
+
+class TIME(_DateTimeMixin, sqltypes.Time):
+ r"""Represent a Python time object in SQLite using a string.
+
+ The default string storage format is::
+
+ "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
+
+ e.g.::
+
+ 12:05:57.10558
+
+ The storage format can be customized to some degree using the
+ ``storage_format`` and ``regexp`` parameters, such as::
+
+ import re
+ from sqlalchemy.dialects.sqlite import TIME
+
+ t = TIME(
+ storage_format="%(hour)02d-%(minute)02d-%(second)02d-\
+%(microsecond)06d",
+ regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
+ )
+
+ :param storage_format: format string which will be applied to the dict
+ with keys hour, minute, second, and microsecond.
+
+ :param regexp: regular expression which will be applied to incoming result
+ rows. If the regexp contains named groups, the resulting match dict is
+ applied to the Python time() constructor as keyword arguments. Otherwise,
+ if positional groups are used, the time() constructor is called with
+ positional arguments via ``*map(int, match_obj.groups(0))``.
+ """
+
+ _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
+
+ def __init__(self, *args, **kwargs):
+ truncate_microseconds = kwargs.pop('truncate_microseconds', False)
+ super(TIME, self).__init__(*args, **kwargs)
+ if truncate_microseconds:
+ assert 'storage_format' not in kwargs, "You can specify only "\
+ "one of truncate_microseconds or storage_format."
+ assert 'regexp' not in kwargs, "You can specify only one of "\
+ "truncate_microseconds or regexp."
+ self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
+
+ def bind_processor(self, dialect):
+ datetime_time = datetime.time
+ format = self._storage_format
+
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, datetime_time):
+ return format % {
+ 'hour': value.hour,
+ 'minute': value.minute,
+ 'second': value.second,
+ 'microsecond': value.microsecond,
+ }
+ else:
+ raise TypeError("SQLite Time type only accepts Python "
+ "time objects as input.")
+ return process
+
+ def result_processor(self, dialect, coltype):
+ if self._reg:
+ return processors.str_to_datetime_processor_factory(
+ self._reg, datetime.time)
+ else:
+ return processors.str_to_time
+
+colspecs = {
+ sqltypes.Date: DATE,
+ sqltypes.DateTime: DATETIME,
+ sqltypes.Time: TIME,
+}
+
+ischema_names = {
+ 'BIGINT': sqltypes.BIGINT,
+ 'BLOB': sqltypes.BLOB,
+ 'BOOL': sqltypes.BOOLEAN,
+ 'BOOLEAN': sqltypes.BOOLEAN,
+ 'CHAR': sqltypes.CHAR,
+ 'DATE': sqltypes.DATE,
+ 'DATE_CHAR': sqltypes.DATE,
+ 'DATETIME': sqltypes.DATETIME,
+ 'DATETIME_CHAR': sqltypes.DATETIME,
+ 'DOUBLE': sqltypes.FLOAT,
+ 'DECIMAL': sqltypes.DECIMAL,
+ 'FLOAT': sqltypes.FLOAT,
+ 'INT': sqltypes.INTEGER,
+ 'INTEGER': sqltypes.INTEGER,
+ 'NUMERIC': sqltypes.NUMERIC,
+ 'REAL': sqltypes.REAL,
+ 'SMALLINT': sqltypes.SMALLINT,
+ 'TEXT': sqltypes.TEXT,
+ 'TIME': sqltypes.TIME,
+ 'TIME_CHAR': sqltypes.TIME,
+ 'TIMESTAMP': sqltypes.TIMESTAMP,
+ 'VARCHAR': sqltypes.VARCHAR,
+ 'NVARCHAR': sqltypes.NVARCHAR,
+ 'NCHAR': sqltypes.NCHAR,
+}
+
+
+class SQLiteCompiler(compiler.SQLCompiler):
+ extract_map = util.update_copy(
+ compiler.SQLCompiler.extract_map,
+ {
+ 'month': '%m',
+ 'day': '%d',
+ 'year': '%Y',
+ 'second': '%S',
+ 'hour': '%H',
+ 'doy': '%j',
+ 'minute': '%M',
+ 'epoch': '%s',
+ 'dow': '%w',
+ 'week': '%W',
+ })
+
+ def visit_now_func(self, fn, **kw):
+ return "CURRENT_TIMESTAMP"
+
+ def visit_localtimestamp_func(self, func, **kw):
+ return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
+
+ def visit_true(self, expr, **kw):
+ return '1'
+
+ def visit_false(self, expr, **kw):
+ return '0'
+
+ def visit_char_length_func(self, fn, **kw):
+ return "length%s" % self.function_argspec(fn)
+
+ def visit_cast(self, cast, **kwargs):
+ if self.dialect.supports_cast:
+ return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
+ else:
+ return self.process(cast.clause, **kwargs)
+
+ def visit_extract(self, extract, **kw):
+ try:
+ return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
+ self.extract_map[extract.field],
+ self.process(extract.expr, **kw)
+ )
+ except KeyError:
+ raise exc.CompileError(
+ "%s is not a valid extract argument." % extract.field)
+
+ def limit_clause(self, select, **kw):
+ text = ""
+ if select._limit_clause is not None:
+ text += "\n LIMIT " + self.process(select._limit_clause, **kw)
+ if select._offset_clause is not None:
+ if select._limit_clause is None:
+ text += "\n LIMIT " + self.process(sql.literal(-1))
+ text += " OFFSET " + self.process(select._offset_clause, **kw)
+ else:
+ text += " OFFSET " + self.process(sql.literal(0), **kw)
+ return text
+
+ def for_update_clause(self, select, **kw):
+ # sqlite has no "FOR UPDATE" AFAICT
+ return ''
+
+ def visit_is_distinct_from_binary(self, binary, operator, **kw):
+ return "%s IS NOT %s" % (self.process(binary.left),
+ self.process(binary.right))
+
+ def visit_isnot_distinct_from_binary(self, binary, operator, **kw):
+ return "%s IS %s" % (self.process(binary.left),
+ self.process(binary.right))
+
+
+class SQLiteDDLCompiler(compiler.DDLCompiler):
+
+ def get_column_specification(self, column, **kwargs):
+ coltype = self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ colspec = self.preparer.format_column(column) + " " + coltype
+ default = self.get_column_default_string(column)
+ if default is not None:
+ colspec += " DEFAULT " + default
+
+ if not column.nullable:
+ colspec += " NOT NULL"
+
+ if column.primary_key:
+ if (
+ column.autoincrement is True and
+ len(column.table.primary_key.columns) != 1
+ ):
+ raise exc.CompileError(
+ "SQLite does not support autoincrement for "
+ "composite primary keys")
+
+ if (column.table.dialect_options['sqlite']['autoincrement'] and
+ len(column.table.primary_key.columns) == 1 and
+ issubclass(column.type._type_affinity, sqltypes.Integer) and
+ not column.foreign_keys):
+ colspec += " PRIMARY KEY AUTOINCREMENT"
+
+ return colspec
+
+ def visit_primary_key_constraint(self, constraint):
+ # for columns with sqlite_autoincrement=True,
+ # the PRIMARY KEY constraint can only be inline
+ # with the column itself.
+ if len(constraint.columns) == 1:
+ c = list(constraint)[0]
+ if (c.primary_key and
+ c.table.dialect_options['sqlite']['autoincrement'] and
+ issubclass(c.type._type_affinity, sqltypes.Integer) and
+ not c.foreign_keys):
+ return None
+
+ return super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
+ constraint)
+
+ def visit_foreign_key_constraint(self, constraint):
+
+ local_table = constraint.elements[0].parent.table
+ remote_table = constraint.elements[0].column.table
+
+ if local_table.schema != remote_table.schema:
+ return None
+ else:
+ return super(
+ SQLiteDDLCompiler,
+ self).visit_foreign_key_constraint(constraint)
+
+ def define_constraint_remote_table(self, constraint, table, preparer):
+ """Format the remote table clause of a CREATE CONSTRAINT clause."""
+
+ return preparer.format_table(table, use_schema=False)
+
+ def visit_create_index(self, create, include_schema=False,
+ include_table_schema=True):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ text += "INDEX %s ON %s (%s)" \
+ % (
+ self._prepared_index_name(index,
+ include_schema=True),
+ preparer.format_table(index.table,
+ use_schema=False),
+ ', '.join(
+ self.sql_compiler.process(
+ expr, include_table=False, literal_binds=True) for
+ expr in index.expressions)
+ )
+
+ whereclause = index.dialect_options["sqlite"]["where"]
+ if whereclause is not None:
+ where_compiled = self.sql_compiler.process(
+ whereclause, include_table=False,
+ literal_binds=True)
+ text += " WHERE " + where_compiled
+
+ return text
+
+
+class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BLOB(type_)
+
+ def visit_DATETIME(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
+ else:
+ return "DATETIME_CHAR"
+
+ def visit_DATE(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_DATE(type_)
+ else:
+ return "DATE_CHAR"
+
+ def visit_TIME(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_TIME(type_)
+ else:
+ return "TIME_CHAR"
+
+
+class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
+ reserved_words = set([
+ 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
+ 'attach', 'autoincrement', 'before', 'begin', 'between', 'by',
+ 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',
+ 'conflict', 'constraint', 'create', 'cross', 'current_date',
+ 'current_time', 'current_timestamp', 'database', 'default',
+ 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',
+ 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',
+ 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',
+ 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',
+ 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect',
+ 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit',
+ 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on',
+ 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query',
+ 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict',
+ 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp',
+ 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union',
+ 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual',
+ 'when', 'where',
+ ])
+
+ def format_index(self, index, use_schema=True, name=None):
+ """Prepare a quoted index and schema name."""
+
+ if name is None:
+ name = index.name
+ result = self.quote(name, index.quote)
+ if (not self.omit_schema and
+ use_schema and
+ getattr(index.table, "schema", None)):
+ result = self.quote_schema(
+ index.table.schema, index.table.quote_schema) + "." + result
+ return result
+
+
+class SQLiteExecutionContext(default.DefaultExecutionContext):
+ @util.memoized_property
+ def _preserve_raw_colnames(self):
+ return not self.dialect._broken_dotted_colnames or \
+ self.execution_options.get("sqlite_raw_colnames", False)
+
+ def _translate_colname(self, colname):
+ # TODO: detect SQLite version 3.10.0 or greater;
+ # see [ticket:3633]
+
+ # adjust for dotted column names. SQLite
+ # in the case of UNION may store col names as
+ # "tablename.colname", or if using an attached database,
+ # "database.tablename.colname", in cursor.description
+ if not self._preserve_raw_colnames and "." in colname:
+ return colname.split(".")[-1], colname
+ else:
+ return colname, None
+
+
+class SQLiteDialect(default.DefaultDialect):
+ name = 'sqlite'
+ supports_alter = False
+ supports_unicode_statements = True
+ supports_unicode_binds = True
+ supports_default_values = True
+ supports_empty_insert = False
+ supports_cast = True
+ supports_multivalues_insert = True
+
+ default_paramstyle = 'qmark'
+ execution_ctx_cls = SQLiteExecutionContext
+ statement_compiler = SQLiteCompiler
+ ddl_compiler = SQLiteDDLCompiler
+ type_compiler = SQLiteTypeCompiler
+ preparer = SQLiteIdentifierPreparer
+ ischema_names = ischema_names
+ colspecs = colspecs
+ isolation_level = None
+
+ supports_cast = True
+ supports_default_values = True
+
+ construct_arguments = [
+ (sa_schema.Table, {
+ "autoincrement": False
+ }),
+ (sa_schema.Index, {
+ "where": None,
+ }),
+ ]
+
+ _broken_fk_pragma_quotes = False
+ _broken_dotted_colnames = False
+
+ def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
+ default.DefaultDialect.__init__(self, **kwargs)
+ self.isolation_level = isolation_level
+
+ # this flag used by pysqlite dialect, and perhaps others in the
+ # future, to indicate the driver is handling date/timestamp
+ # conversions (and perhaps datetime/time as well on some hypothetical
+ # driver ?)
+ self.native_datetime = native_datetime
+
+ if self.dbapi is not None:
+ self.supports_right_nested_joins = (
+ self.dbapi.sqlite_version_info >= (3, 7, 16))
+ self._broken_dotted_colnames = (
+ self.dbapi.sqlite_version_info < (3, 10, 0)
+ )
+ self.supports_default_values = (
+ self.dbapi.sqlite_version_info >= (3, 3, 8))
+ self.supports_cast = (
+ self.dbapi.sqlite_version_info >= (3, 2, 3))
+ self.supports_multivalues_insert = (
+ # http://www.sqlite.org/releaselog/3_7_11.html
+ self.dbapi.sqlite_version_info >= (3, 7, 11))
+ # see http://www.sqlalchemy.org/trac/ticket/2568
+ # as well as http://www.sqlite.org/src/info/600482d161
+ self._broken_fk_pragma_quotes = (
+ self.dbapi.sqlite_version_info < (3, 6, 14))
+
+ _isolation_lookup = {
+ 'READ UNCOMMITTED': 1,
+ 'SERIALIZABLE': 0,
+ }
+
+ def set_isolation_level(self, connection, level):
+ try:
+ isolation_level = self._isolation_lookup[level.replace('_', ' ')]
+ except KeyError:
+ raise exc.ArgumentError(
+ "Invalid value '%s' for isolation_level. "
+ "Valid isolation levels for %s are %s" %
+ (level, self.name, ", ".join(self._isolation_lookup))
+ )
+ cursor = connection.cursor()
+ cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
+ cursor.close()
+
+ def get_isolation_level(self, connection):
+ cursor = connection.cursor()
+ cursor.execute('PRAGMA read_uncommitted')
+ res = cursor.fetchone()
+ if res:
+ value = res[0]
+ else:
+ # http://www.sqlite.org/changes.html#version_3_3_3
+ # "Optional READ UNCOMMITTED isolation (instead of the
+ # default isolation level of SERIALIZABLE) and
+ # table level locking when database connections
+ # share a common cache.""
+ # pre-SQLite 3.3.0 default to 0
+ value = 0
+ cursor.close()
+ if value == 0:
+ return "SERIALIZABLE"
+ elif value == 1:
+ return "READ UNCOMMITTED"
+ else:
+ assert False, "Unknown isolation level %s" % value
+
+ def on_connect(self):
+ if self.isolation_level is not None:
+ def connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ return connect
+ else:
+ return None
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+ s = "PRAGMA database_list"
+ dl = connection.execute(s)
+
+ return [db[1] for db in dl if db[1] != "temp"]
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ if schema is not None:
+ qschema = self.identifier_preparer.quote_identifier(schema)
+ master = '%s.sqlite_master' % qschema
+ else:
+ master = "sqlite_master"
+ s = ("SELECT name FROM %s "
+ "WHERE type='table' ORDER BY name") % (master,)
+ rs = connection.execute(s)
+ return [row[0] for row in rs]
+
+ @reflection.cache
+ def get_temp_table_names(self, connection, **kw):
+ s = "SELECT name FROM sqlite_temp_master "\
+ "WHERE type='table' ORDER BY name "
+ rs = connection.execute(s)
+
+ return [row[0] for row in rs]
+
+ @reflection.cache
+ def get_temp_view_names(self, connection, **kw):
+ s = "SELECT name FROM sqlite_temp_master "\
+ "WHERE type='view' ORDER BY name "
+ rs = connection.execute(s)
+
+ return [row[0] for row in rs]
+
+ def has_table(self, connection, table_name, schema=None):
+ info = self._get_table_pragma(
+ connection, "table_info", table_name, schema=schema)
+ return bool(info)
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ if schema is not None:
+ qschema = self.identifier_preparer.quote_identifier(schema)
+ master = '%s.sqlite_master' % qschema
+ else:
+ master = "sqlite_master"
+ s = ("SELECT name FROM %s "
+ "WHERE type='view' ORDER BY name") % (master,)
+ rs = connection.execute(s)
+
+ return [row[0] for row in rs]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+ if schema is not None:
+ qschema = self.identifier_preparer.quote_identifier(schema)
+ master = '%s.sqlite_master' % qschema
+ s = ("SELECT sql FROM %s WHERE name = '%s'"
+ "AND type='view'") % (master, view_name)
+ rs = connection.execute(s)
+ else:
+ try:
+ s = ("SELECT sql FROM "
+ " (SELECT * FROM sqlite_master UNION ALL "
+ " SELECT * FROM sqlite_temp_master) "
+ "WHERE name = '%s' "
+ "AND type='view'") % view_name
+ rs = connection.execute(s)
+ except exc.DBAPIError:
+ s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
+ "AND type='view'") % view_name
+ rs = connection.execute(s)
+
+ result = rs.fetchall()
+ if result:
+ return result[0].sql
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ info = self._get_table_pragma(
+ connection, "table_info", table_name, schema=schema)
+
+ columns = []
+ for row in info:
+ (name, type_, nullable, default, primary_key) = (
+ row[1], row[2].upper(), not row[3], row[4], row[5])
+
+ columns.append(self._get_column_info(name, type_, nullable,
+ default, primary_key))
+ return columns
+
+ def _get_column_info(self, name, type_, nullable, default, primary_key):
+ coltype = self._resolve_type_affinity(type_)
+
+ if default is not None:
+ default = util.text_type(default)
+
+ return {
+ 'name': name,
+ 'type': coltype,
+ 'nullable': nullable,
+ 'default': default,
+ 'autoincrement': 'auto',
+ 'primary_key': primary_key,
+ }
+
+ def _resolve_type_affinity(self, type_):
+ """Return a data type from a reflected column, using affinity tules.
+
+ SQLite's goal for universal compatibility introduces some complexity
+ during reflection, as a column's defined type might not actually be a
+ type that SQLite understands - or indeed, my not be defined *at all*.
+ Internally, SQLite handles this with a 'data type affinity' for each
+ column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
+ 'REAL', or 'NONE' (raw bits). The algorithm that determines this is
+ listed in http://www.sqlite.org/datatype3.html section 2.1.
+
+ This method allows SQLAlchemy to support that algorithm, while still
+ providing access to smarter reflection utilities by regcognizing
+ column definitions that SQLite only supports through affinity (like
+ DATE and DOUBLE).
+
+ """
+ match = re.match(r'([\w ]+)(\(.*?\))?', type_)
+ if match:
+ coltype = match.group(1)
+ args = match.group(2)
+ else:
+ coltype = ''
+ args = ''
+
+ if coltype in self.ischema_names:
+ coltype = self.ischema_names[coltype]
+ elif 'INT' in coltype:
+ coltype = sqltypes.INTEGER
+ elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype:
+ coltype = sqltypes.TEXT
+ elif 'BLOB' in coltype or not coltype:
+ coltype = sqltypes.NullType
+ elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype:
+ coltype = sqltypes.REAL
+ else:
+ coltype = sqltypes.NUMERIC
+
+ if args is not None:
+ args = re.findall(r'(\d+)', args)
+ try:
+ coltype = coltype(*[int(a) for a in args])
+ except TypeError:
+ util.warn(
+ "Could not instantiate type %s with "
+ "reflected arguments %s; using no arguments." %
+ (coltype, args))
+ coltype = coltype()
+ else:
+ coltype = coltype()
+
+ return coltype
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ constraint_name = None
+ table_data = self._get_table_sql(connection, table_name, schema=schema)
+ if table_data:
+ PK_PATTERN = r'CONSTRAINT (\w+) PRIMARY KEY'
+ result = re.search(PK_PATTERN, table_data, re.I)
+ constraint_name = result.group(1) if result else None
+
+ cols = self.get_columns(connection, table_name, schema, **kw)
+ pkeys = []
+ for col in cols:
+ if col['primary_key']:
+ pkeys.append(col['name'])
+
+ return {'constrained_columns': pkeys, 'name': constraint_name}
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+ # sqlite makes this *extremely difficult*.
+ # First, use the pragma to get the actual FKs.
+ pragma_fks = self._get_table_pragma(
+ connection, "foreign_key_list",
+ table_name, schema=schema
+ )
+
+ fks = {}
+
+ for row in pragma_fks:
+ (numerical_id, rtbl, lcol, rcol) = (
+ row[0], row[2], row[3], row[4])
+
+ if rcol is None:
+ rcol = lcol
+
+ if self._broken_fk_pragma_quotes:
+ rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
+
+ if numerical_id in fks:
+ fk = fks[numerical_id]
+ else:
+ fk = fks[numerical_id] = {
+ 'name': None,
+ 'constrained_columns': [],
+ 'referred_schema': schema,
+ 'referred_table': rtbl,
+ 'referred_columns': [],
+ 'options': {}
+ }
+ fks[numerical_id] = fk
+
+ fk['constrained_columns'].append(lcol)
+ fk['referred_columns'].append(rcol)
+
+ def fk_sig(constrained_columns, referred_table, referred_columns):
+ return tuple(constrained_columns) + (referred_table,) + \
+ tuple(referred_columns)
+
+ # then, parse the actual SQL and attempt to find DDL that matches
+ # the names as well. SQLite saves the DDL in whatever format
+ # it was typed in as, so need to be liberal here.
+
+ keys_by_signature = dict(
+ (
+ fk_sig(
+ fk['constrained_columns'],
+ fk['referred_table'], fk['referred_columns']),
+ fk
+ ) for fk in fks.values()
+ )
+
+ table_data = self._get_table_sql(connection, table_name, schema=schema)
+ if table_data is None:
+ # system tables, etc.
+ return []
+
+ def parse_fks():
+ FK_PATTERN = (
+ r'(?:CONSTRAINT (\w+) +)?'
+ r'FOREIGN KEY *\( *(.+?) *\) +'
+ r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
+ r'((?:ON (?:DELETE|UPDATE) '
+ r'(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)'
+ )
+ for match in re.finditer(FK_PATTERN, table_data, re.I):
+ (
+ constraint_name, constrained_columns,
+ referred_quoted_name, referred_name,
+ referred_columns, onupdatedelete) = \
+ match.group(1, 2, 3, 4, 5, 6)
+ constrained_columns = list(
+ self._find_cols_in_sig(constrained_columns))
+ if not referred_columns:
+ referred_columns = constrained_columns
+ else:
+ referred_columns = list(
+ self._find_cols_in_sig(referred_columns))
+ referred_name = referred_quoted_name or referred_name
+ options = {}
+
+ for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
+ if token.startswith("DELETE"):
+ options['ondelete'] = token[6:].strip()
+ elif token.startswith("UPDATE"):
+ options["onupdate"] = token[6:].strip()
+ yield (
+ constraint_name, constrained_columns,
+ referred_name, referred_columns, options)
+ fkeys = []
+
+ for (
+ constraint_name, constrained_columns,
+ referred_name, referred_columns, options) in parse_fks():
+ sig = fk_sig(
+ constrained_columns, referred_name, referred_columns)
+ if sig not in keys_by_signature:
+ util.warn(
+ "WARNING: SQL-parsed foreign key constraint "
+ "'%s' could not be located in PRAGMA "
+ "foreign_keys for table %s" % (
+ sig,
+ table_name
+ ))
+ continue
+ key = keys_by_signature.pop(sig)
+ key['name'] = constraint_name
+ key['options'] = options
+ fkeys.append(key)
+ # assume the remainders are the unnamed, inline constraints, just
+ # use them as is as it's extremely difficult to parse inline
+ # constraints
+ fkeys.extend(keys_by_signature.values())
+ return fkeys
+
+ def _find_cols_in_sig(self, sig):
+ for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
+ yield match.group(1) or match.group(2)
+
+ @reflection.cache
+ def get_unique_constraints(self, connection, table_name,
+ schema=None, **kw):
+
+ auto_index_by_sig = {}
+ for idx in self.get_indexes(
+ connection, table_name, schema=schema,
+ include_auto_indexes=True, **kw):
+ if not idx['name'].startswith("sqlite_autoindex"):
+ continue
+ sig = tuple(idx['column_names'])
+ auto_index_by_sig[sig] = idx
+
+ table_data = self._get_table_sql(
+ connection, table_name, schema=schema, **kw)
+ if not table_data:
+ return []
+
+ unique_constraints = []
+
+ def parse_uqs():
+ UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
+ INLINE_UNIQUE_PATTERN = (
+ r'(?:(".+?")|([a-z0-9]+)) '
+ r'+[a-z0-9_ ]+? +UNIQUE')
+
+ for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
+ name, cols = match.group(1, 2)
+ yield name, list(self._find_cols_in_sig(cols))
+
+ # we need to match inlines as well, as we seek to differentiate
+ # a UNIQUE constraint from a UNIQUE INDEX, even though these
+ # are kind of the same thing :)
+ for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
+ cols = list(
+ self._find_cols_in_sig(match.group(1) or match.group(2)))
+ yield None, cols
+
+ for name, cols in parse_uqs():
+ sig = tuple(cols)
+ if sig in auto_index_by_sig:
+ auto_index_by_sig.pop(sig)
+ parsed_constraint = {
+ 'name': name,
+ 'column_names': cols
+ }
+ unique_constraints.append(parsed_constraint)
+ # NOTE: auto_index_by_sig might not be empty here,
+ # the PRIMARY KEY may have an entry.
+ return unique_constraints
+
+ @reflection.cache
+ def get_check_constraints(self, connection, table_name,
+ schema=None, **kw):
+ table_data = self._get_table_sql(
+ connection, table_name, schema=schema, **kw)
+ if not table_data:
+ return []
+
+ CHECK_PATTERN = (
+ r'(?:CONSTRAINT (\w+) +)?'
+ r'CHECK *\( *(.+) *\),? *'
+ )
+ check_constraints = []
+ # NOTE: we aren't using re.S here because we actually are
+ # taking advantage of each CHECK constraint being all on one
+ # line in the table definition in order to delineate. This
+ # necessarily makes assumptions as to how the CREATE TABLE
+ # was emitted.
+ for match in re.finditer(CHECK_PATTERN, table_data, re.I):
+ check_constraints.append({
+ 'sqltext': match.group(2),
+ 'name': match.group(1)
+ })
+
+ return check_constraints
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+ pragma_indexes = self._get_table_pragma(
+ connection, "index_list", table_name, schema=schema)
+ indexes = []
+
+ include_auto_indexes = kw.pop('include_auto_indexes', False)
+ for row in pragma_indexes:
+ # ignore implicit primary key index.
+ # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
+ if (not include_auto_indexes and
+ row[1].startswith('sqlite_autoindex')):
+ continue
+
+ indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
+
+ # loop thru unique indexes to get the column names.
+ for idx in indexes:
+ pragma_index = self._get_table_pragma(
+ connection, "index_info", idx['name'])
+
+ for row in pragma_index:
+ idx['column_names'].append(row[2])
+ return indexes
+
+ @reflection.cache
+ def _get_table_sql(self, connection, table_name, schema=None, **kw):
+ try:
+ s = ("SELECT sql FROM "
+ " (SELECT * FROM sqlite_master UNION ALL "
+ " SELECT * FROM sqlite_temp_master) "
+ "WHERE name = '%s' "
+ "AND type = 'table'") % table_name
+ rs = connection.execute(s)
+ except exc.DBAPIError:
+ s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
+ "AND type = 'table'") % table_name
+ rs = connection.execute(s)
+ return rs.scalar()
+
+ def _get_table_pragma(self, connection, pragma, table_name, schema=None):
+ quote = self.identifier_preparer.quote_identifier
+ if schema is not None:
+ statement = "PRAGMA %s." % quote(schema)
+ else:
+ statement = "PRAGMA "
+ qtable = quote(table_name)
+ statement = "%s%s(%s)" % (statement, pragma, qtable)
+ cursor = connection.execute(statement)
+ if not cursor._soft_closed:
+ # work around SQLite issue whereby cursor.description
+ # is blank when PRAGMA returns no rows:
+ # http://www.sqlite.org/cvstrac/tktview?tn=1884
+ result = cursor.fetchall()
+ else:
+ result = []
+ return result
diff --git a/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
new file mode 100644
index 0000000..e005d2e
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
@@ -0,0 +1,130 @@
+# sqlite/pysqlcipher.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: sqlite+pysqlcipher
+ :name: pysqlcipher
+ :dbapi: pysqlcipher
+ :connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=]
+ :url: https://pypi.python.org/pypi/pysqlcipher
+
+ ``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
+ use of the `SQLCipher `_ backend.
+
+ ``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
+ will attempt to import it if ``pysqlcipher`` is non-present.
+
+ .. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
+
+ .. versionadded:: 0.9.9 - added pysqlcipher dialect
+
+Driver
+------
+
+The driver here is the `pysqlcipher `_
+driver, which makes use of the SQLCipher engine. This system essentially
+introduces new PRAGMA commands to SQLite which allows the setting of a
+passphrase and other encryption parameters, allowing the database
+file to be encrypted.
+
+`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
+the driver is the same.
+
+Connect Strings
+---------------
+
+The format of the connect string is in every way the same as that
+of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
+"password" field is now accepted, which should contain a passphrase::
+
+ e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
+
+For an absolute file path, two leading slashes should be used for the
+database name::
+
+ e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
+
+A selection of additional encryption-related pragmas supported by SQLCipher
+as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
+in the query string, and will result in that PRAGMA being called for each
+new connection. Currently, ``cipher``, ``kdf_iter``
+``cipher_page_size`` and ``cipher_use_hmac`` are supported::
+
+ e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
+
+
+Pooling Behavior
+----------------
+
+The driver makes a change to the default pool behavior of pysqlite
+as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
+has been observed to be significantly slower on connection than the
+pysqlite driver, most likely due to the encryption overhead, so the
+dialect here defaults to using the :class:`.SingletonThreadPool`
+implementation,
+instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
+implementation is entirely configurable using the
+:paramref:`.create_engine.poolclass` parameter; the :class:`.StaticPool` may
+be more feasible for single-threaded use, or :class:`.NullPool` may be used
+to prevent unencrypted connections from being held open for long periods of
+time, at the expense of slower startup time for new connections.
+
+
+"""
+from __future__ import absolute_import
+from .pysqlite import SQLiteDialect_pysqlite
+from ...engine import url as _url
+from ... import pool
+
+
+class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
+ driver = 'pysqlcipher'
+
+ pragmas = ('kdf_iter', 'cipher', 'cipher_page_size', 'cipher_use_hmac')
+
+ @classmethod
+ def dbapi(cls):
+ try:
+ from pysqlcipher import dbapi2 as sqlcipher
+ except ImportError as e:
+ try:
+ from pysqlcipher3 import dbapi2 as sqlcipher
+ except ImportError:
+ raise e
+ return sqlcipher
+
+ @classmethod
+ def get_pool_class(cls, url):
+ return pool.SingletonThreadPool
+
+ def connect(self, *cargs, **cparams):
+ passphrase = cparams.pop('passphrase', '')
+
+ pragmas = dict(
+ (key, cparams.pop(key, None)) for key in
+ self.pragmas
+ )
+
+ conn = super(SQLiteDialect_pysqlcipher, self).\
+ connect(*cargs, **cparams)
+ conn.execute('pragma key="%s"' % passphrase)
+ for prag, value in pragmas.items():
+ if value is not None:
+ conn.execute('pragma %s="%s"' % (prag, value))
+
+ return conn
+
+ def create_connect_args(self, url):
+ super_url = _url.URL(
+ url.drivername, username=url.username,
+ host=url.host, database=url.database, query=url.query)
+ c_args, opts = super(SQLiteDialect_pysqlcipher, self).\
+ create_connect_args(super_url)
+ opts['passphrase'] = url.password
+ return c_args, opts
+
+dialect = SQLiteDialect_pysqlcipher
diff --git a/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py
new file mode 100644
index 0000000..40a7cbb
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sqlite/pysqlite.py
@@ -0,0 +1,377 @@
+# sqlite/pysqlite.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+r"""
+.. dialect:: sqlite+pysqlite
+ :name: pysqlite
+ :dbapi: sqlite3
+ :connectstring: sqlite+pysqlite:///file_path
+ :url: http://docs.python.org/library/sqlite3.html
+
+ Note that ``pysqlite`` is the same driver as the ``sqlite3``
+ module included with the Python distribution.
+
+Driver
+------
+
+When using Python 2.5 and above, the built in ``sqlite3`` driver is
+already installed and no additional installation is needed. Otherwise,
+the ``pysqlite2`` driver needs to be present. This is the same driver as
+``sqlite3``, just with a different name.
+
+The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
+is loaded. This allows an explicitly installed pysqlite driver to take
+precedence over the built in one. As with all dialects, a specific
+DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
+this explicitly::
+
+ from sqlite3 import dbapi2 as sqlite
+ e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
+
+
+Connect Strings
+---------------
+
+The file specification for the SQLite database is taken as the "database"
+portion of the URL. Note that the format of a SQLAlchemy url is::
+
+ driver://user:pass@host/database
+
+This means that the actual filename to be used starts with the characters to
+the **right** of the third slash. So connecting to a relative filepath
+looks like::
+
+ # relative path
+ e = create_engine('sqlite:///path/to/database.db')
+
+An absolute path, which is denoted by starting with a slash, means you
+need **four** slashes::
+
+ # absolute path
+ e = create_engine('sqlite:////path/to/database.db')
+
+To use a Windows path, regular drive specifications and backslashes can be
+used. Double backslashes are probably needed::
+
+ # absolute path on Windows
+ e = create_engine('sqlite:///C:\\path\\to\\database.db')
+
+The sqlite ``:memory:`` identifier is the default if no filepath is
+present. Specify ``sqlite://`` and nothing else::
+
+ # in-memory database
+ e = create_engine('sqlite://')
+
+Compatibility with sqlite3 "native" date and datetime types
+-----------------------------------------------------------
+
+The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
+sqlite3.PARSE_COLNAMES options, which have the effect of any column
+or expression explicitly cast as "date" or "timestamp" will be converted
+to a Python date or datetime object. The date and datetime types provided
+with the pysqlite dialect are not currently compatible with these options,
+since they render the ISO date/datetime including microseconds, which
+pysqlite's driver does not. Additionally, SQLAlchemy does not at
+this time automatically render the "cast" syntax required for the
+freestanding functions "current_timestamp" and "current_date" to return
+datetime/date types natively. Unfortunately, pysqlite
+does not provide the standard DBAPI types in ``cursor.description``,
+leaving SQLAlchemy with no way to detect these types on the fly
+without expensive per-row type checks.
+
+Keeping in mind that pysqlite's parsing option is not recommended,
+nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
+can be forced if one configures "native_datetime=True" on create_engine()::
+
+ engine = create_engine('sqlite://',
+ connect_args={'detect_types':
+ sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
+ native_datetime=True
+ )
+
+With this flag enabled, the DATE and TIMESTAMP types (but note - not the
+DATETIME or TIME types...confused yet ?) will not perform any bind parameter
+or result processing. Execution of "func.current_date()" will return a string.
+"func.current_timestamp()" is registered as returning a DATETIME type in
+SQLAlchemy, so this function still receives SQLAlchemy-level result
+processing.
+
+.. _pysqlite_threading_pooling:
+
+Threading/Pooling Behavior
+---------------------------
+
+Pysqlite's default behavior is to prohibit the usage of a single connection
+in more than one thread. This is originally intended to work with older
+versions of SQLite that did not support multithreaded operation under
+various circumstances. In particular, older SQLite versions
+did not allow a ``:memory:`` database to be used in multiple threads
+under any circumstances.
+
+Pysqlite does include a now-undocumented flag known as
+``check_same_thread`` which will disable this check, however note that
+pysqlite connections are still not safe to use in concurrently in multiple
+threads. In particular, any statement execution calls would need to be
+externally mutexed, as Pysqlite does not provide for thread-safe propagation
+of error messages among other things. So while even ``:memory:`` databases
+can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
+thread-safety to make this usage worth it.
+
+SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
+
+* When a ``:memory:`` SQLite database is specified, the dialect by default
+ will use :class:`.SingletonThreadPool`. This pool maintains a single
+ connection per thread, so that all access to the engine within the current
+ thread use the same ``:memory:`` database - other threads would access a
+ different ``:memory:`` database.
+* When a file-based database is specified, the dialect will use
+ :class:`.NullPool` as the source of connections. This pool closes and
+ discards connections which are returned to the pool immediately. SQLite
+ file-based connections have extremely low overhead, so pooling is not
+ necessary. The scheme also prevents a connection from being used again in
+ a different thread and works best with SQLite's coarse-grained file locking.
+
+ .. versionchanged:: 0.7
+ Default selection of :class:`.NullPool` for SQLite file-based databases.
+ Previous versions select :class:`.SingletonThreadPool` by
+ default for all SQLite databases.
+
+
+Using a Memory Database in Multiple Threads
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To use a ``:memory:`` database in a multithreaded scenario, the same
+connection object must be shared among threads, since the database exists
+only within the scope of that connection. The
+:class:`.StaticPool` implementation will maintain a single connection
+globally, and the ``check_same_thread`` flag can be passed to Pysqlite
+as ``False``::
+
+ from sqlalchemy.pool import StaticPool
+ engine = create_engine('sqlite://',
+ connect_args={'check_same_thread':False},
+ poolclass=StaticPool)
+
+Note that using a ``:memory:`` database in multiple threads requires a recent
+version of SQLite.
+
+Using Temporary Tables with SQLite
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Due to the way SQLite deals with temporary tables, if you wish to use a
+temporary table in a file-based SQLite database across multiple checkouts
+from the connection pool, such as when using an ORM :class:`.Session` where
+the temporary table should continue to remain after :meth:`.Session.commit` or
+:meth:`.Session.rollback` is called, a pool which maintains a single
+connection must be used. Use :class:`.SingletonThreadPool` if the scope is
+only needed within the current thread, or :class:`.StaticPool` is scope is
+needed within multiple threads for this case::
+
+ # maintain the same connection per thread
+ from sqlalchemy.pool import SingletonThreadPool
+ engine = create_engine('sqlite:///mydb.db',
+ poolclass=SingletonThreadPool)
+
+
+ # maintain the same connection across all threads
+ from sqlalchemy.pool import StaticPool
+ engine = create_engine('sqlite:///mydb.db',
+ poolclass=StaticPool)
+
+Note that :class:`.SingletonThreadPool` should be configured for the number
+of threads that are to be used; beyond that number, connections will be
+closed out in a non deterministic way.
+
+Unicode
+-------
+
+The pysqlite driver only returns Python ``unicode`` objects in result sets,
+never plain strings, and accommodates ``unicode`` objects within bound
+parameter values in all cases. Regardless of the SQLAlchemy string type in
+use, string-based result values will by Python ``unicode`` in Python 2.
+The :class:`.Unicode` type should still be used to indicate those columns that
+require unicode, however, so that non-``unicode`` values passed inadvertently
+will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
+is passed containing non-ASCII characters.
+
+.. _pysqlite_serializable:
+
+Serializable isolation / Savepoints / Transactional DDL
+-------------------------------------------------------
+
+In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
+driver's assortment of issues that prevent several features of SQLite
+from working correctly. The pysqlite DBAPI driver has several
+long-standing bugs which impact the correctness of its transactional
+behavior. In its default mode of operation, SQLite features such as
+SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
+non-functional, and in order to use these features, workarounds must
+be taken.
+
+The issue is essentially that the driver attempts to second-guess the user's
+intent, failing to start transactions and sometimes ending them prematurely, in
+an effort to minimize the SQLite databases's file locking behavior, even
+though SQLite itself uses "shared" locks for read-only activities.
+
+SQLAlchemy chooses to not alter this behavior by default, as it is the
+long-expected behavior of the pysqlite driver; if and when the pysqlite
+driver attempts to repair these issues, that will be more of a driver towards
+defaults for SQLAlchemy.
+
+The good news is that with a few events, we can implement transactional
+support fully, by disabling pysqlite's feature entirely and emitting BEGIN
+ourselves. This is achieved using two event listeners::
+
+ from sqlalchemy import create_engine, event
+
+ engine = create_engine("sqlite:///myfile.db")
+
+ @event.listens_for(engine, "connect")
+ def do_connect(dbapi_connection, connection_record):
+ # disable pysqlite's emitting of the BEGIN statement entirely.
+ # also stops it from emitting COMMIT before any DDL.
+ dbapi_connection.isolation_level = None
+
+ @event.listens_for(engine, "begin")
+ def do_begin(conn):
+ # emit our own BEGIN
+ conn.execute("BEGIN")
+
+Above, we intercept a new pysqlite connection and disable any transactional
+integration. Then, at the point at which SQLAlchemy knows that transaction
+scope is to begin, we emit ``"BEGIN"`` ourselves.
+
+When we take control of ``"BEGIN"``, we can also control directly SQLite's
+locking modes, introduced at `BEGIN TRANSACTION `_,
+by adding the desired locking mode to our ``"BEGIN"``::
+
+ @event.listens_for(engine, "begin")
+ def do_begin(conn):
+ conn.execute("BEGIN EXCLUSIVE")
+
+.. seealso::
+
+ `BEGIN TRANSACTION `_ - on the SQLite site
+
+ `sqlite3 SELECT does not BEGIN a transaction `_ - on the Python bug tracker
+
+ `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker
+
+
+"""
+
+from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
+from sqlalchemy import exc, pool
+from sqlalchemy import types as sqltypes
+from sqlalchemy import util
+
+import os
+
+
+class _SQLite_pysqliteTimeStamp(DATETIME):
+ def bind_processor(self, dialect):
+ if dialect.native_datetime:
+ return None
+ else:
+ return DATETIME.bind_processor(self, dialect)
+
+ def result_processor(self, dialect, coltype):
+ if dialect.native_datetime:
+ return None
+ else:
+ return DATETIME.result_processor(self, dialect, coltype)
+
+
+class _SQLite_pysqliteDate(DATE):
+ def bind_processor(self, dialect):
+ if dialect.native_datetime:
+ return None
+ else:
+ return DATE.bind_processor(self, dialect)
+
+ def result_processor(self, dialect, coltype):
+ if dialect.native_datetime:
+ return None
+ else:
+ return DATE.result_processor(self, dialect, coltype)
+
+
+class SQLiteDialect_pysqlite(SQLiteDialect):
+ default_paramstyle = 'qmark'
+
+ colspecs = util.update_copy(
+ SQLiteDialect.colspecs,
+ {
+ sqltypes.Date: _SQLite_pysqliteDate,
+ sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
+ }
+ )
+
+ if not util.py2k:
+ description_encoding = None
+
+ driver = 'pysqlite'
+
+ def __init__(self, **kwargs):
+ SQLiteDialect.__init__(self, **kwargs)
+
+ if self.dbapi is not None:
+ sqlite_ver = self.dbapi.version_info
+ if sqlite_ver < (2, 1, 3):
+ util.warn(
+ ("The installed version of pysqlite2 (%s) is out-dated "
+ "and will cause errors in some cases. Version 2.1.3 "
+ "or greater is recommended.") %
+ '.'.join([str(subver) for subver in sqlite_ver]))
+
+ @classmethod
+ def dbapi(cls):
+ try:
+ from pysqlite2 import dbapi2 as sqlite
+ except ImportError as e:
+ try:
+ from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
+ except ImportError:
+ raise e
+ return sqlite
+
+ @classmethod
+ def get_pool_class(cls, url):
+ if url.database and url.database != ':memory:':
+ return pool.NullPool
+ else:
+ return pool.SingletonThreadPool
+
+ def _get_server_version_info(self, connection):
+ return self.dbapi.sqlite_version_info
+
+ def create_connect_args(self, url):
+ if url.username or url.password or url.host or url.port:
+ raise exc.ArgumentError(
+ "Invalid SQLite URL: %s\n"
+ "Valid SQLite URL forms are:\n"
+ " sqlite:///:memory: (or, sqlite://)\n"
+ " sqlite:///relative/path/to/file.db\n"
+ " sqlite:////absolute/path/to/file.db" % (url,))
+ filename = url.database or ':memory:'
+ if filename != ':memory:':
+ filename = os.path.abspath(filename)
+
+ opts = url.query.copy()
+ util.coerce_kw_type(opts, 'timeout', float)
+ util.coerce_kw_type(opts, 'isolation_level', str)
+ util.coerce_kw_type(opts, 'detect_types', int)
+ util.coerce_kw_type(opts, 'check_same_thread', bool)
+ util.coerce_kw_type(opts, 'cached_statements', int)
+
+ return ([filename], opts)
+
+ def is_disconnect(self, e, connection, cursor):
+ return isinstance(e, self.dbapi.ProgrammingError) and \
+ "Cannot operate on a closed database." in str(e)
+
+dialect = SQLiteDialect_pysqlite
diff --git a/app/lib/sqlalchemy/dialects/sybase/__init__.py b/app/lib/sqlalchemy/dialects/sybase/__init__.py
new file mode 100644
index 0000000..1e72790
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sybase/__init__.py
@@ -0,0 +1,28 @@
+# sybase/__init__.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from sqlalchemy.dialects.sybase import base, pysybase, pyodbc
+
+# default dialect
+base.dialect = pyodbc.dialect
+
+from .base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
+ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
+ BIGINT, INT, INTEGER, SMALLINT, BINARY,\
+ VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\
+ IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\
+ dialect
+
+
+__all__ = (
+ 'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR',
+ 'TEXT', 'DATE', 'DATETIME', 'FLOAT', 'NUMERIC',
+ 'BIGINT', 'INT', 'INTEGER', 'SMALLINT', 'BINARY',
+ 'VARBINARY', 'UNITEXT', 'UNICHAR', 'UNIVARCHAR',
+ 'IMAGE', 'BIT', 'MONEY', 'SMALLMONEY', 'TINYINT',
+ 'dialect'
+)
diff --git a/app/lib/sqlalchemy/dialects/sybase/base.py b/app/lib/sqlalchemy/dialects/sybase/base.py
new file mode 100644
index 0000000..5d2f0f7
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sybase/base.py
@@ -0,0 +1,821 @@
+# sybase/base.py
+# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors
+#
+# get_select_precolumns(), limit_clause() implementation
+# copyright (C) 2007 Fisch Asset Management
+# AG http://www.fam.ch, with coding by Alexander Houben
+# alexander.houben@thor-solutions.ch
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+
+.. dialect:: sybase
+ :name: Sybase
+
+.. note::
+
+ The Sybase dialect functions on current SQLAlchemy versions
+ but is not regularly tested, and may have many issues and
+ caveats not currently handled.
+
+"""
+import operator
+import re
+
+from sqlalchemy.sql import compiler, expression, text, bindparam
+from sqlalchemy.engine import default, base, reflection
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql import operators as sql_operators
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import util, sql, exc
+
+from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
+ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
+ BIGINT, INT, INTEGER, SMALLINT, BINARY,\
+ VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
+ UnicodeText, REAL
+
+RESERVED_WORDS = set([
+ "add", "all", "alter", "and",
+ "any", "as", "asc", "backup",
+ "begin", "between", "bigint", "binary",
+ "bit", "bottom", "break", "by",
+ "call", "capability", "cascade", "case",
+ "cast", "char", "char_convert", "character",
+ "check", "checkpoint", "close", "comment",
+ "commit", "connect", "constraint", "contains",
+ "continue", "convert", "create", "cross",
+ "cube", "current", "current_timestamp", "current_user",
+ "cursor", "date", "dbspace", "deallocate",
+ "dec", "decimal", "declare", "default",
+ "delete", "deleting", "desc", "distinct",
+ "do", "double", "drop", "dynamic",
+ "else", "elseif", "encrypted", "end",
+ "endif", "escape", "except", "exception",
+ "exec", "execute", "existing", "exists",
+ "externlogin", "fetch", "first", "float",
+ "for", "force", "foreign", "forward",
+ "from", "full", "goto", "grant",
+ "group", "having", "holdlock", "identified",
+ "if", "in", "index", "index_lparen",
+ "inner", "inout", "insensitive", "insert",
+ "inserting", "install", "instead", "int",
+ "integer", "integrated", "intersect", "into",
+ "iq", "is", "isolation", "join",
+ "key", "lateral", "left", "like",
+ "lock", "login", "long", "match",
+ "membership", "message", "mode", "modify",
+ "natural", "new", "no", "noholdlock",
+ "not", "notify", "null", "numeric",
+ "of", "off", "on", "open",
+ "option", "options", "or", "order",
+ "others", "out", "outer", "over",
+ "passthrough", "precision", "prepare", "primary",
+ "print", "privileges", "proc", "procedure",
+ "publication", "raiserror", "readtext", "real",
+ "reference", "references", "release", "remote",
+ "remove", "rename", "reorganize", "resource",
+ "restore", "restrict", "return", "revoke",
+ "right", "rollback", "rollup", "save",
+ "savepoint", "scroll", "select", "sensitive",
+ "session", "set", "setuser", "share",
+ "smallint", "some", "sqlcode", "sqlstate",
+ "start", "stop", "subtrans", "subtransaction",
+ "synchronize", "syntax_error", "table", "temporary",
+ "then", "time", "timestamp", "tinyint",
+ "to", "top", "tran", "trigger",
+ "truncate", "tsequal", "unbounded", "union",
+ "unique", "unknown", "unsigned", "update",
+ "updating", "user", "using", "validate",
+ "values", "varbinary", "varchar", "variable",
+ "varying", "view", "wait", "waitfor",
+ "when", "where", "while", "window",
+ "with", "with_cube", "with_lparen", "with_rollup",
+ "within", "work", "writetext",
+])
+
+
+class _SybaseUnitypeMixin(object):
+ """these types appear to return a buffer object."""
+
+ def result_processor(self, dialect, coltype):
+ def process(value):
+ if value is not None:
+ return str(value) # decode("ucs-2")
+ else:
+ return None
+ return process
+
+
+class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
+ __visit_name__ = 'UNICHAR'
+
+
+class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
+ __visit_name__ = 'UNIVARCHAR'
+
+
+class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
+ __visit_name__ = 'UNITEXT'
+
+
+class TINYINT(sqltypes.Integer):
+ __visit_name__ = 'TINYINT'
+
+
+class BIT(sqltypes.TypeEngine):
+ __visit_name__ = 'BIT'
+
+
+class MONEY(sqltypes.TypeEngine):
+ __visit_name__ = "MONEY"
+
+
+class SMALLMONEY(sqltypes.TypeEngine):
+ __visit_name__ = "SMALLMONEY"
+
+
+class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
+ __visit_name__ = "UNIQUEIDENTIFIER"
+
+
+class IMAGE(sqltypes.LargeBinary):
+ __visit_name__ = 'IMAGE'
+
+
+class SybaseTypeCompiler(compiler.GenericTypeCompiler):
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_IMAGE(type_)
+
+ def visit_boolean(self, type_, **kw):
+ return self.visit_BIT(type_)
+
+ def visit_unicode(self, type_, **kw):
+ return self.visit_NVARCHAR(type_)
+
+ def visit_UNICHAR(self, type_, **kw):
+ return "UNICHAR(%d)" % type_.length
+
+ def visit_UNIVARCHAR(self, type_, **kw):
+ return "UNIVARCHAR(%d)" % type_.length
+
+ def visit_UNITEXT(self, type_, **kw):
+ return "UNITEXT"
+
+ def visit_TINYINT(self, type_, **kw):
+ return "TINYINT"
+
+ def visit_IMAGE(self, type_, **kw):
+ return "IMAGE"
+
+ def visit_BIT(self, type_, **kw):
+ return "BIT"
+
+ def visit_MONEY(self, type_, **kw):
+ return "MONEY"
+
+ def visit_SMALLMONEY(self, type_, **kw):
+ return "SMALLMONEY"
+
+ def visit_UNIQUEIDENTIFIER(self, type_, **kw):
+ return "UNIQUEIDENTIFIER"
+
+ischema_names = {
+ 'bigint': BIGINT,
+ 'int': INTEGER,
+ 'integer': INTEGER,
+ 'smallint': SMALLINT,
+ 'tinyint': TINYINT,
+ 'unsigned bigint': BIGINT, # TODO: unsigned flags
+ 'unsigned int': INTEGER, # TODO: unsigned flags
+ 'unsigned smallint': SMALLINT, # TODO: unsigned flags
+ 'numeric': NUMERIC,
+ 'decimal': DECIMAL,
+ 'dec': DECIMAL,
+ 'float': FLOAT,
+ 'double': NUMERIC, # TODO
+ 'double precision': NUMERIC, # TODO
+ 'real': REAL,
+ 'smallmoney': SMALLMONEY,
+ 'money': MONEY,
+ 'smalldatetime': DATETIME,
+ 'datetime': DATETIME,
+ 'date': DATE,
+ 'time': TIME,
+ 'char': CHAR,
+ 'character': CHAR,
+ 'varchar': VARCHAR,
+ 'character varying': VARCHAR,
+ 'char varying': VARCHAR,
+ 'unichar': UNICHAR,
+ 'unicode character': UNIVARCHAR,
+ 'nchar': NCHAR,
+ 'national char': NCHAR,
+ 'national character': NCHAR,
+ 'nvarchar': NVARCHAR,
+ 'nchar varying': NVARCHAR,
+ 'national char varying': NVARCHAR,
+ 'national character varying': NVARCHAR,
+ 'text': TEXT,
+ 'unitext': UNITEXT,
+ 'binary': BINARY,
+ 'varbinary': VARBINARY,
+ 'image': IMAGE,
+ 'bit': BIT,
+
+ # not in documentation for ASE 15.7
+ 'long varchar': TEXT, # TODO
+ 'timestamp': TIMESTAMP,
+ 'uniqueidentifier': UNIQUEIDENTIFIER,
+
+}
+
+
+class SybaseInspector(reflection.Inspector):
+
+ def __init__(self, conn):
+ reflection.Inspector.__init__(self, conn)
+
+ def get_table_id(self, table_name, schema=None):
+ """Return the table id from `table_name` and `schema`."""
+
+ return self.dialect.get_table_id(self.bind, table_name, schema,
+ info_cache=self.info_cache)
+
+
+class SybaseExecutionContext(default.DefaultExecutionContext):
+ _enable_identity_insert = False
+
+ def set_ddl_autocommit(self, connection, value):
+ """Must be implemented by subclasses to accommodate DDL executions.
+
+ "connection" is the raw unwrapped DBAPI connection. "value"
+ is True or False. when True, the connection should be configured
+ such that a DDL can take place subsequently. when False,
+ a DDL has taken place and the connection should be resumed
+ into non-autocommit mode.
+
+ """
+ raise NotImplementedError()
+
+ def pre_exec(self):
+ if self.isinsert:
+ tbl = self.compiled.statement.table
+ seq_column = tbl._autoincrement_column
+ insert_has_sequence = seq_column is not None
+
+ if insert_has_sequence:
+ self._enable_identity_insert = \
+ seq_column.key in self.compiled_parameters[0]
+ else:
+ self._enable_identity_insert = False
+
+ if self._enable_identity_insert:
+ self.cursor.execute(
+ "SET IDENTITY_INSERT %s ON" %
+ self.dialect.identifier_preparer.format_table(tbl))
+
+ if self.isddl:
+ # TODO: to enhance this, we can detect "ddl in tran" on the
+ # database settings. this error message should be improved to
+ # include a note about that.
+ if not self.should_autocommit:
+ raise exc.InvalidRequestError(
+ "The Sybase dialect only supports "
+ "DDL in 'autocommit' mode at this time.")
+
+ self.root_connection.engine.logger.info(
+ "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
+
+ self.set_ddl_autocommit(
+ self.root_connection.connection.connection,
+ True)
+
+ def post_exec(self):
+ if self.isddl:
+ self.set_ddl_autocommit(self.root_connection, False)
+
+ if self._enable_identity_insert:
+ self.cursor.execute(
+ "SET IDENTITY_INSERT %s OFF" %
+ self.dialect.identifier_preparer.
+ format_table(self.compiled.statement.table)
+ )
+
+ def get_lastrowid(self):
+ cursor = self.create_cursor()
+ cursor.execute("SELECT @@identity AS lastrowid")
+ lastrowid = cursor.fetchone()[0]
+ cursor.close()
+ return lastrowid
+
+
+class SybaseSQLCompiler(compiler.SQLCompiler):
+ ansi_bind_rules = True
+
+ extract_map = util.update_copy(
+ compiler.SQLCompiler.extract_map,
+ {
+ 'doy': 'dayofyear',
+ 'dow': 'weekday',
+ 'milliseconds': 'millisecond'
+ })
+
+ def get_select_precolumns(self, select, **kw):
+ s = select._distinct and "DISTINCT " or ""
+ # TODO: don't think Sybase supports
+ # bind params for FIRST / TOP
+ limit = select._limit
+ if limit:
+ # if select._limit == 1:
+ # s += "FIRST "
+ # else:
+ # s += "TOP %s " % (select._limit,)
+ s += "TOP %s " % (limit,)
+ offset = select._offset
+ if offset:
+ raise NotImplementedError("Sybase ASE does not support OFFSET")
+ return s
+
+ def get_from_hint_text(self, table, text):
+ return text
+
+ def limit_clause(self, select, **kw):
+ # Limit in sybase is after the select keyword
+ return ""
+
+ def visit_extract(self, extract, **kw):
+ field = self.extract_map.get(extract.field, extract.field)
+ return 'DATEPART("%s", %s)' % (
+ field, self.process(extract.expr, **kw))
+
+ def visit_now_func(self, fn, **kw):
+ return "GETDATE()"
+
+ def for_update_clause(self, select):
+ # "FOR UPDATE" is only allowed on "DECLARE CURSOR"
+ # which SQLAlchemy doesn't use
+ return ''
+
+ def order_by_clause(self, select, **kw):
+ kw['literal_binds'] = True
+ order_by = self.process(select._order_by_clause, **kw)
+
+ # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
+ if order_by and (not self.is_subquery() or select._limit):
+ return " ORDER BY " + order_by
+ else:
+ return ""
+
+
+class SybaseDDLCompiler(compiler.DDLCompiler):
+ def get_column_specification(self, column, **kwargs):
+ colspec = self.preparer.format_column(column) + " " + \
+ self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+
+ if column.table is None:
+ raise exc.CompileError(
+ "The Sybase dialect requires Table-bound "
+ "columns in order to generate DDL")
+ seq_col = column.table._autoincrement_column
+
+ # install a IDENTITY Sequence if we have an implicit IDENTITY column
+ if seq_col is column:
+ sequence = isinstance(column.default, sa_schema.Sequence) \
+ and column.default
+ if sequence:
+ start, increment = sequence.start or 1, \
+ sequence.increment or 1
+ else:
+ start, increment = 1, 1
+ if (start, increment) == (1, 1):
+ colspec += " IDENTITY"
+ else:
+ # TODO: need correct syntax for this
+ colspec += " IDENTITY(%s,%s)" % (start, increment)
+ else:
+ default = self.get_column_default_string(column)
+ if default is not None:
+ colspec += " DEFAULT " + default
+
+ if column.nullable is not None:
+ if not column.nullable or column.primary_key:
+ colspec += " NOT NULL"
+ else:
+ colspec += " NULL"
+
+ return colspec
+
+ def visit_drop_index(self, drop):
+ index = drop.element
+ return "\nDROP INDEX %s.%s" % (
+ self.preparer.quote_identifier(index.table.name),
+ self._prepared_index_name(drop.element,
+ include_schema=False)
+ )
+
+
+class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
+ reserved_words = RESERVED_WORDS
+
+
+class SybaseDialect(default.DefaultDialect):
+ name = 'sybase'
+ supports_unicode_statements = False
+ supports_sane_rowcount = False
+ supports_sane_multi_rowcount = False
+
+ supports_native_boolean = False
+ supports_unicode_binds = False
+ postfetch_lastrowid = True
+
+ colspecs = {}
+ ischema_names = ischema_names
+
+ type_compiler = SybaseTypeCompiler
+ statement_compiler = SybaseSQLCompiler
+ ddl_compiler = SybaseDDLCompiler
+ preparer = SybaseIdentifierPreparer
+ inspector = SybaseInspector
+
+ construct_arguments = []
+
+ def _get_default_schema_name(self, connection):
+ return connection.scalar(
+ text("SELECT user_name() as user_name",
+ typemap={'user_name': Unicode})
+ )
+
+ def initialize(self, connection):
+ super(SybaseDialect, self).initialize(connection)
+ if self.server_version_info is not None and\
+ self.server_version_info < (15, ):
+ self.max_identifier_length = 30
+ else:
+ self.max_identifier_length = 255
+
+ def get_table_id(self, connection, table_name, schema=None, **kw):
+ """Fetch the id for schema.table_name.
+
+ Several reflection methods require the table id. The idea for using
+ this method is that it can be fetched one time and cached for
+ subsequent calls.
+
+ """
+
+ table_id = None
+ if schema is None:
+ schema = self.default_schema_name
+
+ TABLEID_SQL = text("""
+ SELECT o.id AS id
+ FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
+ WHERE u.name = :schema_name
+ AND o.name = :table_name
+ AND o.type in ('U', 'V')
+ """)
+
+ if util.py2k:
+ if isinstance(schema, unicode):
+ schema = schema.encode("ascii")
+ if isinstance(table_name, unicode):
+ table_name = table_name.encode("ascii")
+ result = connection.execute(TABLEID_SQL,
+ schema_name=schema,
+ table_name=table_name)
+ table_id = result.scalar()
+ if table_id is None:
+ raise exc.NoSuchTableError(table_name)
+ return table_id
+
+ @reflection.cache
+ def get_columns(self, connection, table_name, schema=None, **kw):
+ table_id = self.get_table_id(connection, table_name, schema,
+ info_cache=kw.get("info_cache"))
+
+ COLUMN_SQL = text("""
+ SELECT col.name AS name,
+ t.name AS type,
+ (col.status & 8) AS nullable,
+ (col.status & 128) AS autoincrement,
+ com.text AS 'default',
+ col.prec AS precision,
+ col.scale AS scale,
+ col.length AS length
+ FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
+ col.cdefault = com.id
+ WHERE col.usertype = t.usertype
+ AND col.id = :table_id
+ ORDER BY col.colid
+ """)
+
+ results = connection.execute(COLUMN_SQL, table_id=table_id)
+
+ columns = []
+ for (name, type_, nullable, autoincrement, default, precision, scale,
+ length) in results:
+ col_info = self._get_column_info(name, type_, bool(nullable),
+ bool(autoincrement),
+ default, precision, scale,
+ length)
+ columns.append(col_info)
+
+ return columns
+
+ def _get_column_info(self, name, type_, nullable, autoincrement, default,
+ precision, scale, length):
+
+ coltype = self.ischema_names.get(type_, None)
+
+ kwargs = {}
+
+ if coltype in (NUMERIC, DECIMAL):
+ args = (precision, scale)
+ elif coltype == FLOAT:
+ args = (precision,)
+ elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
+ args = (length,)
+ else:
+ args = ()
+
+ if coltype:
+ coltype = coltype(*args, **kwargs)
+ # is this necessary
+ # if is_array:
+ # coltype = ARRAY(coltype)
+ else:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (type_, name))
+ coltype = sqltypes.NULLTYPE
+
+ if default:
+ default = default.replace("DEFAULT", "").strip()
+ default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
+ else:
+ default = None
+
+ column_info = dict(name=name, type=coltype, nullable=nullable,
+ default=default, autoincrement=autoincrement)
+ return column_info
+
+ @reflection.cache
+ def get_foreign_keys(self, connection, table_name, schema=None, **kw):
+
+ table_id = self.get_table_id(connection, table_name, schema,
+ info_cache=kw.get("info_cache"))
+
+ table_cache = {}
+ column_cache = {}
+ foreign_keys = []
+
+ table_cache[table_id] = {"name": table_name, "schema": schema}
+
+ COLUMN_SQL = text("""
+ SELECT c.colid AS id, c.name AS name
+ FROM syscolumns c
+ WHERE c.id = :table_id
+ """)
+
+ results = connection.execute(COLUMN_SQL, table_id=table_id)
+ columns = {}
+ for col in results:
+ columns[col["id"]] = col["name"]
+ column_cache[table_id] = columns
+
+ REFCONSTRAINT_SQL = text("""
+ SELECT o.name AS name, r.reftabid AS reftable_id,
+ r.keycnt AS 'count',
+ r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
+ r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
+ r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
+ r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
+ r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
+ r.fokey16 AS fokey16,
+ r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
+ r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
+ r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
+ r.refkey10 AS refkey10, r.refkey11 AS refkey11,
+ r.refkey12 AS refkey12, r.refkey13 AS refkey13,
+ r.refkey14 AS refkey14, r.refkey15 AS refkey15,
+ r.refkey16 AS refkey16
+ FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
+ WHERE r.tableid = :table_id
+ """)
+ referential_constraints = connection.execute(
+ REFCONSTRAINT_SQL, table_id=table_id).fetchall()
+
+ REFTABLE_SQL = text("""
+ SELECT o.name AS name, u.name AS 'schema'
+ FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
+ WHERE o.id = :table_id
+ """)
+
+ for r in referential_constraints:
+ reftable_id = r["reftable_id"]
+
+ if reftable_id not in table_cache:
+ c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
+ reftable = c.fetchone()
+ c.close()
+ table_info = {"name": reftable["name"], "schema": None}
+ if (schema is not None or
+ reftable["schema"] != self.default_schema_name):
+ table_info["schema"] = reftable["schema"]
+
+ table_cache[reftable_id] = table_info
+ results = connection.execute(COLUMN_SQL, table_id=reftable_id)
+ reftable_columns = {}
+ for col in results:
+ reftable_columns[col["id"]] = col["name"]
+ column_cache[reftable_id] = reftable_columns
+
+ reftable = table_cache[reftable_id]
+ reftable_columns = column_cache[reftable_id]
+
+ constrained_columns = []
+ referred_columns = []
+ for i in range(1, r["count"] + 1):
+ constrained_columns.append(columns[r["fokey%i" % i]])
+ referred_columns.append(reftable_columns[r["refkey%i" % i]])
+
+ fk_info = {
+ "constrained_columns": constrained_columns,
+ "referred_schema": reftable["schema"],
+ "referred_table": reftable["name"],
+ "referred_columns": referred_columns,
+ "name": r["name"]
+ }
+
+ foreign_keys.append(fk_info)
+
+ return foreign_keys
+
+ @reflection.cache
+ def get_indexes(self, connection, table_name, schema=None, **kw):
+ table_id = self.get_table_id(connection, table_name, schema,
+ info_cache=kw.get("info_cache"))
+
+ INDEX_SQL = text("""
+ SELECT object_name(i.id) AS table_name,
+ i.keycnt AS 'count',
+ i.name AS name,
+ (i.status & 0x2) AS 'unique',
+ index_col(object_name(i.id), i.indid, 1) AS col_1,
+ index_col(object_name(i.id), i.indid, 2) AS col_2,
+ index_col(object_name(i.id), i.indid, 3) AS col_3,
+ index_col(object_name(i.id), i.indid, 4) AS col_4,
+ index_col(object_name(i.id), i.indid, 5) AS col_5,
+ index_col(object_name(i.id), i.indid, 6) AS col_6,
+ index_col(object_name(i.id), i.indid, 7) AS col_7,
+ index_col(object_name(i.id), i.indid, 8) AS col_8,
+ index_col(object_name(i.id), i.indid, 9) AS col_9,
+ index_col(object_name(i.id), i.indid, 10) AS col_10,
+ index_col(object_name(i.id), i.indid, 11) AS col_11,
+ index_col(object_name(i.id), i.indid, 12) AS col_12,
+ index_col(object_name(i.id), i.indid, 13) AS col_13,
+ index_col(object_name(i.id), i.indid, 14) AS col_14,
+ index_col(object_name(i.id), i.indid, 15) AS col_15,
+ index_col(object_name(i.id), i.indid, 16) AS col_16
+ FROM sysindexes i, sysobjects o
+ WHERE o.id = i.id
+ AND o.id = :table_id
+ AND (i.status & 2048) = 0
+ AND i.indid BETWEEN 1 AND 254
+ """)
+
+ results = connection.execute(INDEX_SQL, table_id=table_id)
+ indexes = []
+ for r in results:
+ column_names = []
+ for i in range(1, r["count"]):
+ column_names.append(r["col_%i" % (i,)])
+ index_info = {"name": r["name"],
+ "unique": bool(r["unique"]),
+ "column_names": column_names}
+ indexes.append(index_info)
+
+ return indexes
+
+ @reflection.cache
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ table_id = self.get_table_id(connection, table_name, schema,
+ info_cache=kw.get("info_cache"))
+
+ PK_SQL = text("""
+ SELECT object_name(i.id) AS table_name,
+ i.keycnt AS 'count',
+ i.name AS name,
+ index_col(object_name(i.id), i.indid, 1) AS pk_1,
+ index_col(object_name(i.id), i.indid, 2) AS pk_2,
+ index_col(object_name(i.id), i.indid, 3) AS pk_3,
+ index_col(object_name(i.id), i.indid, 4) AS pk_4,
+ index_col(object_name(i.id), i.indid, 5) AS pk_5,
+ index_col(object_name(i.id), i.indid, 6) AS pk_6,
+ index_col(object_name(i.id), i.indid, 7) AS pk_7,
+ index_col(object_name(i.id), i.indid, 8) AS pk_8,
+ index_col(object_name(i.id), i.indid, 9) AS pk_9,
+ index_col(object_name(i.id), i.indid, 10) AS pk_10,
+ index_col(object_name(i.id), i.indid, 11) AS pk_11,
+ index_col(object_name(i.id), i.indid, 12) AS pk_12,
+ index_col(object_name(i.id), i.indid, 13) AS pk_13,
+ index_col(object_name(i.id), i.indid, 14) AS pk_14,
+ index_col(object_name(i.id), i.indid, 15) AS pk_15,
+ index_col(object_name(i.id), i.indid, 16) AS pk_16
+ FROM sysindexes i, sysobjects o
+ WHERE o.id = i.id
+ AND o.id = :table_id
+ AND (i.status & 2048) = 2048
+ AND i.indid BETWEEN 1 AND 254
+ """)
+
+ results = connection.execute(PK_SQL, table_id=table_id)
+ pks = results.fetchone()
+ results.close()
+
+ constrained_columns = []
+ if pks:
+ for i in range(1, pks["count"] + 1):
+ constrained_columns.append(pks["pk_%i" % (i,)])
+ return {"constrained_columns": constrained_columns,
+ "name": pks["name"]}
+ else:
+ return {"constrained_columns": [], "name": None}
+
+ @reflection.cache
+ def get_schema_names(self, connection, **kw):
+
+ SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
+
+ schemas = connection.execute(SCHEMA_SQL)
+
+ return [s["name"] for s in schemas]
+
+ @reflection.cache
+ def get_table_names(self, connection, schema=None, **kw):
+ if schema is None:
+ schema = self.default_schema_name
+
+ TABLE_SQL = text("""
+ SELECT o.name AS name
+ FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
+ WHERE u.name = :schema_name
+ AND o.type = 'U'
+ """)
+
+ if util.py2k:
+ if isinstance(schema, unicode):
+ schema = schema.encode("ascii")
+
+ tables = connection.execute(TABLE_SQL, schema_name=schema)
+
+ return [t["name"] for t in tables]
+
+ @reflection.cache
+ def get_view_definition(self, connection, view_name, schema=None, **kw):
+ if schema is None:
+ schema = self.default_schema_name
+
+ VIEW_DEF_SQL = text("""
+ SELECT c.text
+ FROM syscomments c JOIN sysobjects o ON c.id = o.id
+ WHERE o.name = :view_name
+ AND o.type = 'V'
+ """)
+
+ if util.py2k:
+ if isinstance(view_name, unicode):
+ view_name = view_name.encode("ascii")
+
+ view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
+
+ return view.scalar()
+
+ @reflection.cache
+ def get_view_names(self, connection, schema=None, **kw):
+ if schema is None:
+ schema = self.default_schema_name
+
+ VIEW_SQL = text("""
+ SELECT o.name AS name
+ FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
+ WHERE u.name = :schema_name
+ AND o.type = 'V'
+ """)
+
+ if util.py2k:
+ if isinstance(schema, unicode):
+ schema = schema.encode("ascii")
+ views = connection.execute(VIEW_SQL, schema_name=schema)
+
+ return [v["name"] for v in views]
+
+ def has_table(self, connection, table_name, schema=None):
+ try:
+ self.get_table_id(connection, table_name, schema)
+ except exc.NoSuchTableError:
+ return False
+ else:
+ return True
diff --git a/app/lib/sqlalchemy/dialects/sybase/mxodbc.py b/app/lib/sqlalchemy/dialects/sybase/mxodbc.py
new file mode 100644
index 0000000..1e77edc
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sybase/mxodbc.py
@@ -0,0 +1,33 @@
+# sybase/mxodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+
+.. dialect:: sybase+mxodbc
+ :name: mxODBC
+ :dbapi: mxodbc
+ :connectstring: sybase+mxodbc://:@
+ :url: http://www.egenix.com/
+
+.. note::
+
+ This dialect is a stub only and is likely non functional at this time.
+
+
+"""
+from sqlalchemy.dialects.sybase.base import SybaseDialect
+from sqlalchemy.dialects.sybase.base import SybaseExecutionContext
+from sqlalchemy.connectors.mxodbc import MxODBCConnector
+
+
+class SybaseExecutionContext_mxodbc(SybaseExecutionContext):
+ pass
+
+
+class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect):
+ execution_ctx_cls = SybaseExecutionContext_mxodbc
+
+dialect = SybaseDialect_mxodbc
diff --git a/app/lib/sqlalchemy/dialects/sybase/pyodbc.py b/app/lib/sqlalchemy/dialects/sybase/pyodbc.py
new file mode 100644
index 0000000..9690f49
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sybase/pyodbc.py
@@ -0,0 +1,86 @@
+# sybase/pyodbc.py
+# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: sybase+pyodbc
+ :name: PyODBC
+ :dbapi: pyodbc
+ :connectstring: sybase+pyodbc://:@\
+[/]
+ :url: http://pypi.python.org/pypi/pyodbc/
+
+
+Unicode Support
+---------------
+
+The pyodbc driver currently supports usage of these Sybase types with
+Unicode or multibyte strings::
+
+ CHAR
+ NCHAR
+ NVARCHAR
+ TEXT
+ VARCHAR
+
+Currently *not* supported are::
+
+ UNICHAR
+ UNITEXT
+ UNIVARCHAR
+
+"""
+
+from sqlalchemy.dialects.sybase.base import SybaseDialect,\
+ SybaseExecutionContext
+from sqlalchemy.connectors.pyodbc import PyODBCConnector
+from sqlalchemy import types as sqltypes, processors
+import decimal
+
+
+class _SybNumeric_pyodbc(sqltypes.Numeric):
+ """Turns Decimals with adjusted() < -6 into floats.
+
+ It's not yet known how to get decimals with many
+ significant digits or very large adjusted() into Sybase
+ via pyodbc.
+
+ """
+
+ def bind_processor(self, dialect):
+ super_process = super(_SybNumeric_pyodbc, self).\
+ bind_processor(dialect)
+
+ def process(value):
+ if self.asdecimal and \
+ isinstance(value, decimal.Decimal):
+
+ if value.adjusted() < -6:
+ return processors.to_float(value)
+
+ if super_process:
+ return super_process(value)
+ else:
+ return value
+ return process
+
+
+class SybaseExecutionContext_pyodbc(SybaseExecutionContext):
+ def set_ddl_autocommit(self, connection, value):
+ if value:
+ connection.autocommit = True
+ else:
+ connection.autocommit = False
+
+
+class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect):
+ execution_ctx_cls = SybaseExecutionContext_pyodbc
+
+ colspecs = {
+ sqltypes.Numeric: _SybNumeric_pyodbc,
+ }
+
+dialect = SybaseDialect_pyodbc
diff --git a/app/lib/sqlalchemy/dialects/sybase/pysybase.py b/app/lib/sqlalchemy/dialects/sybase/pysybase.py
new file mode 100644
index 0000000..00a7ca3
--- /dev/null
+++ b/app/lib/sqlalchemy/dialects/sybase/pysybase.py
@@ -0,0 +1,102 @@
+# sybase/pysybase.py
+# Copyright (C) 2010-2017 the SQLAlchemy authors and contributors
+#
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+.. dialect:: sybase+pysybase
+ :name: Python-Sybase
+ :dbapi: Sybase
+ :connectstring: sybase+pysybase://: